Python numpy.linspace() Examples

The following are code examples for showing how to use numpy.linspace(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: b2ac   Author: hbldh   File: ellipse.py    MIT License 10 votes vote down vote up
def polygonize(self, n=73):
        """Gets a approximate polygon array representing the ellipse.

        Note that the last point is the same as the first point, creating a closed
        polygon.

        :param n: The number of points to generate. Default is 73 (one vertex every 5 degrees).
        :type n: int
        :return: An [n  x 2] numpy array, describing the boundary vertices of
                 the polygonized ellipse.
        :rtype: :py:class:`numpy.ndarray`

        """
        t = np.linspace(0, 2 * np.pi, num=n, endpoint=True)
        out = np.zeros((len(t), 2), dtype='float')
        out[:, 0] = (self.center_point[0] +
                     self.radii[0] * np.cos(t) * np.cos(self.rotation_angle) -
                     self.radii[1] * np.sin(t) * np.sin(self.rotation_angle))
        out[:, 1] = (self.center_point[1] +
                     self.radii[0] * np.cos(t) * np.sin(self.rotation_angle) +
                     self.radii[1] * np.sin(t) * np.cos(self.rotation_angle))
        return out 
Example 2
Project: spacesense   Author: spacesense-ai   File: utils.py    GNU Lesser General Public License v3.0 8 votes vote down vote up
def optimize_OneClassSVM(X, n):
    print('searching for optimal hyperparameters...')
    nu = np.linspace(start=1e-5, stop=1e-2, num=n)
    gamma = np.linspace(start=1e-6, stop=1e-3, num=n)
    opt_diff = 1.0
    opt_nu = None
    opt_gamma = None
    for i in range(len(nu)):
        for j in range(len(gamma)):
            classifier = svm.OneClassSVM(kernel="rbf", nu=nu[i], gamma=gamma[j])
            classifier.fit(X)
            label = classifier.predict(X)
            p = 1 - float(sum(label == 1.0)) / len(label)
            diff = math.fabs(p - nu[i])
            if diff < opt_diff:
                opt_diff = diff
                opt_nu = nu[i]
                opt_gamma = gamma[j]
    return opt_nu, opt_gamma 
Example 3
Project: xrft   Author: xgcm   File: xrft.py    MIT License 7 votes vote down vote up
def _azimuthal_wvnum(k, l, N, nfactor):
    k = k.values
    l = l.values
    K = np.sqrt(k[np.newaxis,:]**2 + l[:,np.newaxis]**2)
    nbins = int(N/nfactor)
    if k.max() > l.max():
        ki = np.linspace(0., l.max(), nbins)
    else:
        ki = np.linspace(0., k.max(), nbins)

    kidx = np.digitize(np.ravel(K), ki)
    area = np.bincount(kidx)

    kr = np.bincount(kidx, weights=K.ravel()) / area

    return kidx, area, kr 
Example 4
Project: FRIDA   Author: LCAV   File: doa.py    MIT License 7 votes vote down vote up
def compute_mode(self):
        """
        Pre-compute mode vectors from candidate locations (in spherical 
        coordinates).
        """
        if self.num_loc is None:
            raise ValueError('Lookup table appears to be empty. \
                Run build_lookup().')
        self.mode_vec = np.zeros((self.max_bin,self.M,self.num_loc), 
            dtype='complex64')
        if (self.nfft % 2 == 1):
            raise ValueError('Signal length must be even.')
        f = 1.0 / self.nfft * np.linspace(0, self.nfft / 2, self.max_bin) \
            * 1j * 2 * np.pi
        for i in range(self.num_loc):
            p_s = self.loc[:, i]
            for m in range(self.M):
                p_m = self.L[:, m]
                if (self.mode == 'near'):
                    dist = np.linalg.norm(p_m - p_s, axis=1)
                if (self.mode == 'far'):
                    dist = np.dot(p_s, p_m)
                # tau = np.round(self.fs*dist/self.c) # discrete - jagged
                tau = self.fs * dist / self.c  # "continuous" - smoother
                self.mode_vec[:, m, i] = np.exp(f * tau) 
Example 5
Project: xrft   Author: xgcm   File: test_xrft.py    MIT License 6 votes vote down vote up
def test_cross_phase_2d(self, dask):
        Ny, Nx = (32, 16)
        x = np.linspace(0, 1, num=Nx, endpoint=False)
        y = np.ones(Ny)
        f = 6
        phase_offset = np.pi/2
        signal1 = np.cos(2*np.pi*f*x)  # frequency = 1/(2*pi)
        signal2 = np.cos(2*np.pi*f*x - phase_offset)
        da1 = xr.DataArray(data=signal1*y[:,np.newaxis], name='a',
                          dims=['y','x'], coords={'y':y, 'x':x})
        da2 = xr.DataArray(data=signal2*y[:,np.newaxis], name='b',
                          dims=['y','x'], coords={'y':y, 'x':x})
        with pytest.raises(ValueError):
            xrft.cross_phase(da1, da2, dim=['y','x'])

        if dask:
            da1 = da1.chunk({'x': 16})
            da2 = da2.chunk({'x': 16})
        cp = xrft.cross_phase(da1, da2, dim=['x'])
        actual_phase_offset = cp.sel(freq_x=f).values
        npt.assert_almost_equal(actual_phase_offset, phase_offset) 
Example 6
Project: StructEngPy   Author: zhuoju36   File: spectrum.py    MIT License 6 votes vote down vote up
def __init__(self,alpha_max,Tg,xi):
        gamma=0.9+(0.05-xi)/(0.3+6*xi)
        eta1=0.02+(0.05-xi)/(4+32*xi)
        eta1=eta1 if eta1>0 else 0
        eta2=1+(0.05-xi)/(0.08+1.6*xi)
        eta2=eta2 if eta2>0.55 else 0.55
        T=np.linspace(0,6,601)
        alpha=[]
        for t in T:
            if t<0.1:
                alpha.append(np.interp(t,[0,0.1],[0.45*alpha_max,eta2*alpha_max]))
            elif t<Tg:
                alpha.append(eta2*alpha_max)
            elif t<5*Tg:
                alpha.append((Tg/t)**gamma*eta2*alpha_max)
            else:
                alpha.append((eta2*0.2**gamma-eta1*(t-5*Tg))*alpha_max)
        self.__spectrum={'T':T,'alpha':alpha} 
Example 7
Project: cplot   Author: sunchaoatmo   File: cscontour.py    GNU General Public License v3.0 6 votes vote down vote up
def cshistplot(sample,alpha,xsample,ax,lw,label,color,shade,legend=True,hist=False,**kwargs):
  import numpy as np
  from scipy import stats
  if hist:
    hist,bin_edges=np.histogram(sample, bins='fd', range=None)
    x=(bin_edges[1:]+bin_edges[:-1])*.5
    y=hist/float(len(sample))*100.0
  else:
    x=np.linspace(xsample[0],xsample[-1],100)
    kernal=stats.gaussian_kde(sample, bw_method= "scott")
    #kernal=stats.gaussian_kde(sample, bw_method= "silverman")
    y=kernal(x)
    if max(y)<1:
      y=y*100
  ax.plot(x, y, color=color, label=label,lw=lw, **kwargs)
  if shade:
    ax.fill_between(x, 1e-12, y, facecolor=color, alpha=alpha)

  # Draw the legend here
  ax.legend(loc="best") 
Example 8
Project: OpenFermion-Cirq   Author: quantumlib   File: fermionic_simulation_test.py    Apache License 2.0 6 votes vote down vote up
def test_weights_and_exponent(weights):
    exponents = np.linspace(-1, 1, 8)
    gates = tuple(
        ofc.QuarticFermionicSimulationGate(weights / exponent,
                                         exponent=exponent)
        for exponent in exponents)

    for g1 in gates:
        for g2 in gates:
            assert cirq.approx_eq(g1, g2, atol=1e-100)

    for i, (gate, exponent) in enumerate(zip(gates, exponents)):
        assert gate.exponent == 1
        new_exponent = exponents[-i]
        new_gate = gate._with_exponent(new_exponent)
        assert new_gate.exponent == new_exponent 
Example 9
Project: pygram11   Author: douglasdavis   File: test_histogram.py    MIT License 6 votes vote down vote up
def test_fix2d():
    x = np.random.randn(5000)
    y = np.random.randn(5000)
    bins = 25
    w = np.random.uniform(0.2, 0.5, 5000)

    pygram_h, __ = pygram11.fix2d(x, y, bins=bins, range=((-3, 3), (-2, 2)))
    numpy_h, __, __ = np.histogram2d(
        x, y, bins=[np.linspace(-3, 3, 26), np.linspace(-2, 2, 26)]
    )
    npt.assert_almost_equal(pygram_h, numpy_h, 5)

    pygram_h, __ = pygram11.fix2d(
        x, y, bins=(25, 27), range=((-3, 3), (-2, 1)), weights=w
    )
    numpy_h, __, __ = np.histogram2d(
        x, y, bins=[np.linspace(-3, 3, 26), np.linspace(-2, 1, 28)], weights=w
    )
    npt.assert_almost_equal(pygram_h, numpy_h, 5) 
Example 10
Project: pygram11   Author: douglasdavis   File: test_histogram.py    MIT License 6 votes vote down vote up
def test_numpyAPI_fix2d():
    x = np.random.randn(5000)
    y = np.random.randn(5000)
    bins = 25
    w = np.random.uniform(0.2, 0.5, 5000)

    pygram_h, __ = pygram11.histogram2d(x, y, bins=bins, range=((-3, 3), (-2, 2)))
    numpy_h, __, __ = np.histogram2d(x, y, bins=bins, range=((-3, 3), (-2, 2)))
    npt.assert_almost_equal(pygram_h, numpy_h, 5)

    pygram_h, __ = pygram11.histogram2d(
        x, y, bins=(25, 27), range=((-3, 3), (-2, 1)), weights=w
    )
    numpy_h, __, __ = np.histogram2d(
        x, y, bins=[np.linspace(-3, 3, 26), np.linspace(-2, 1, 28)], weights=w
    )
    npt.assert_almost_equal(pygram_h, numpy_h, 5) 
Example 11
Project: nn_framework   Author: brohrer   File: autoencoder_viz.py    MIT License 6 votes vote down vote up
def plot_connection(self, ax_boss, x0, x1, y0, y1):
        """
        Represent the weights connecting nodes in one layer
        to nodes in the next.
        """
        weight = np.random.sample() * 2 - 1
        x = np.linspace(x0, x1, num=50)
        y = y0 + (y1 - y0) * (
            -np.cos(
                np.pi * (x - x0) / (x1 - x0)
            ) + 1) / 2
        if weight > 0:
            conn_color = self.tan
        else:
            conn_color = self.blue
        ax_boss.plot(x, y, color=conn_color, linewidth=weight) 
Example 12
Project: ros_dmp   Author: abhishek098   File: dmp_discrete.py    Apache License 2.0 6 votes vote down vote up
def gen_centers(self):
        """Set the centre of the Gaussian basis
        functions be spaced evenly throughout run time"""

        '''x_track = self.cs.discrete_rollout()
        t = np.arange(len(x_track))*self.dt
        # choose the points in time we'd like centers to be at
        c_des = np.linspace(0, self.cs.run_time, self.n_bfs)
        self.c = np.zeros(len(c_des))
        for ii, point in enumerate(c_des):
            diff = abs(t - point)
            self.c[ii] = x_track[np.where(diff == min(diff))[0][0]]'''

        # desired activations throughout time
        des_c = np.linspace(0, self.cs.run_time, self.n_bfs)

        self.c = np.ones(len(des_c))
        for n in range(len(des_c)):
            # finding x for desired times t
            self.c[n] = np.exp(-self.cs.ax * des_c[n]) 
Example 13
Project: sfcc   Author: kv-kunalvyas   File: auxiliary.py    MIT License 5 votes vote down vote up
def plotLearningCurves(train, classifier):
    #P.show()
    X = train.values[:, 1::]
    y = train.values[:, 0]

    train_sizes, train_scores, test_scores = learning_curve(
            classifier, X, y, cv=10, n_jobs=-1, train_sizes=np.linspace(.1, 1., 10), verbose=0)

    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)

    plt.figure()
    plt.title("Learning Curves")
    plt.legend(loc="best")
    plt.xlabel("Training samples")
    plt.ylabel("Error Rate")
    plt.ylim((0, 1))
    plt.gca().invert_yaxis()
    plt.grid()

    # Plot the average training and test score lines at each training set size
    plt.plot(train_sizes, train_scores_mean, 'o-', color="b", label="Training score")
    plt.plot(train_sizes, test_scores_mean, 'o-', color="r", label="Test score")

    # Plot the std deviation as a transparent range at each training set size
    plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,
                     alpha=0.1, color="b")
    plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std,
                     alpha=0.1, color="r")

    # Draw the plot and reset the y-axis
    plt.draw()
    plt.gca().invert_yaxis()

    # shuffle and split training and test sets
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25)
    classifier.fit(X_train, y_train)
    plt.show() 
Example 14
Project: xrft   Author: xgcm   File: test_xrft.py    MIT License 5 votes vote down vote up
def test_data_1d(request):
    """Create one dimensional test DataArray."""
    Nx = 16
    Lx = 1.0
    x = np.linspace(0, Lx, Nx)
    dx = x[1] - x[0]
    coords = None if request.param == 'nocoords' else [x]
    da = xr.DataArray(np.random.rand(Nx), coords=coords, dims=['x'])
    if request.param == 'dask':
        da = da.chunk()
    return da 
Example 15
Project: xrft   Author: xgcm   File: test_xrft.py    MIT License 5 votes vote down vote up
def test_cross_phase_1d(self, dask):
        N = 32
        x = np.linspace(0, 1, num=N, endpoint=False)
        f = 6
        phase_offset = np.pi/2
        signal1 = np.cos(2*np.pi*f*x)  # frequency = 1/(2*pi)
        signal2 = np.cos(2*np.pi*f*x - phase_offset)
        da1 = xr.DataArray(data=signal1, name='a', dims=['x'], coords={'x': x})
        da2 = xr.DataArray(data=signal2, name='b', dims=['x'], coords={'x': x})

        if dask:
            da1 = da1.chunk({'x': 32})
            da2 = da2.chunk({'x': 32})
        cp = xrft.cross_phase(da1, da2, dim=['x'])

        actual_phase_offset = cp.sel(freq_x=f).values
        npt.assert_almost_equal(actual_phase_offset, phase_offset)
        assert cp.name == 'a_b_phase'

        xrt.assert_equal(xrft.cross_phase(da1, da2), cp)

        with pytest.raises(ValueError):
            xrft.cross_phase(da1, da2.isel(x=0).drop('x'))

        with pytest.raises(ValueError):
            xrft.cross_phase(da1, da2.rename({'x':'y'})) 
Example 16
Project: wikilinks   Author: trovdimi   File: click_distributions.py    MIT License 5 votes vote down vote up
def plot_counts_frequency():

    fig = plt.figure()
    ax = fig.add_subplot(111)


    category_distributions = read_pickle(HOME+'output/category_counts_distribution.obj')
    data = category_distributions['counts']
    data = [int(x[0]) for x in data]
    #to consider the edges that have zero transitions we substract the number transitions from the number of edges in wikipeida
    #number_of_edges = 339463340
    #zeros = np.zeros((number_of_edges - len(data)))
    #data = np.append(zeros, data)
    #bins = [0,11]
    #bins.extend(np.linspace(100,10000))
    #data = data.extend(listofzeros)
    #print data
    hist, bin_edges = np.histogram(data, bins=10000)
    #print len(hist)
    #print len(bin_edges)
    print hist, bin_edges

    ax.set_yscale('log')
    ax.set_xscale('log')
    ax.plot(bin_edges[:-1], hist, marker='o', markersize=3, markeredgecolor='none', color='#D65F5F')

    #ax.set_ylim([10**0, 10**6])
    #ax.set_xlim([10**0, 10**6])
    ax.set_xlabel('Number of transitions')
    ax.set_ylabel('Frequency')

    fig.tight_layout()
    fig.savefig( 'output/agg_counts_distributions.pdf', bbox_inches='tight') 
Example 17
Project: OpenAPS   Author: medicinexlab   File: bglomb.py    MIT License 5 votes vote down vote up
def _get_lomb_scargle(bg_df, start_index, end_index, plot_lomb_array):
        bg_time_array, bg_value_array, bg_gap_start_time, bg_gap_end_time = _make_data_array(bg_df, start_index, end_index, 'bg')
        iob_time_array, iob_value_array, iob_gap_start_time, iob_gap_end_time = _make_data_array(bg_df, start_index, end_index, 'IOB')
        cob_time_array, cob_value_array, cob_gap_start_time, cob_gap_end_time = _make_data_array(bg_df, start_index, end_index, 'COB')

        #Keep track of the data start and end times in the array
        data_gap_start_time = bg_gap_start_time + iob_gap_start_time + cob_gap_start_time
        data_gap_end_time = bg_gap_end_time + iob_gap_end_time + cob_gap_end_time

        period = np.linspace(0, int(bg_time_array.max()), int(bg_time_array.max()) + 1) #set period to be as large as possible

        bg_lomb = _run_lomb_scargle(bg_time_array, bg_value_array, period)
        iob_lomb = _run_lomb_scargle(iob_time_array, iob_value_array, period)
        cob_lomb = _run_lomb_scargle(cob_time_array, cob_value_array, period)

        #Set all bg/cob values below zero equal to zero (iob can be negative if it is below baseline levels)
        bg_lomb[bg_lomb < 0] = 0
        cob_lomb[cob_lomb < 0] = 0

        #Plot lomb-scargle if values in the plot_lomb_array
        if len(plot_lomb_array) > 0:
            plt.clf()
            if "bg" in plot_lomb_array: _plot_lomb(period, bg_lomb, bg_time_array, bg_value_array, "BG")
            if "iob" in plot_lomb_array: _plot_lomb(period, iob_lomb, iob_time_array, iob_value_array, "IOB")
            if "cob" in plot_lomb_array: _plot_lomb(period, cob_lomb, cob_time_array, cob_value_array, "COB")
            plt.legend(loc='upper left')
            plt.show()

        return period, bg_lomb, iob_lomb, cob_lomb, data_gap_start_time, data_gap_end_time


#Class to store the lomb data 
Example 18
Project: FRIDA   Author: LCAV   File: generators.py    MIT License 5 votes vote down vote up
def gen_sig_at_mic(sigmak2_k, phi_k, pos_mic_x,
                   pos_mic_y, omega_band, sound_speed,
                   SNR, Ns=256):
    """
    generate complex base-band signal received at microphones
    :param sigmak2_k: the variance of the circulant complex Gaussian signal
                emitted by the K sources
    :param phi_k: source locations (azimuths)
    :param pos_mic_x: a vector that contains microphones' x coordinates
    :param pos_mic_y: a vector that contains microphones' y coordinates
    :param omega_band: mid-band (ANGULAR) frequency [radian/sec]
    :param sound_speed: speed of sound
    :param SNR: SNR for the received signal at microphones
    :param Ns: number of snapshots used to estimate the covariance matrix
    :return: y_mic: received (complex) signal at microphones
    """
    num_mic = pos_mic_x.size
    xk, yk = polar2cart(1, phi_k)  # source locations in cartesian coordinates
    # reshape to use broadcasting
    xk = np.reshape(xk, (1, -1), order='F')
    yk = np.reshape(yk, (1, -1), order='F')
    pos_mic_x = np.reshape(pos_mic_x, (-1, 1), order='F')
    pos_mic_y = np.reshape(pos_mic_y, (-1, 1), order='F')

    t = np.reshape(np.linspace(0, 10 * np.pi, num=Ns), (1, -1), order='F')
    K = sigmak2_k.size
    sigmak2_k = np.reshape(sigmak2_k, (-1, 1), order='F')

    # x_tilde_k size: K x length_of_t
    # circular complex Gaussian process
    x_tilde_k = np.sqrt(sigmak2_k / 2.) * (np.random.randn(K, Ns) + 1j *
                                           np.random.randn(K, Ns))
    y_mic = np.dot(np.exp(-1j * (xk * pos_mic_x + yk * pos_mic_y) / (sound_speed / omega_band)),
                   x_tilde_k * np.exp(1j * omega_band * t))
    signal_energy = linalg.norm(y_mic, 'fro') ** 2
    noise_energy = signal_energy / 10 ** (SNR * 0.1)
    sigma2_noise = noise_energy / (Ns * num_mic)
    noise = np.sqrt(sigma2_noise / 2.) * (np.random.randn(*y_mic.shape) + 1j *
                                          np.random.randn(*y_mic.shape))
    y_mic_noisy = y_mic + noise
    return y_mic_noisy, y_mic 
Example 19
Project: fbpconv_tf   Author: panakino   File: layers.py    GNU General Public License v3.0 5 votes vote down vote up
def _load_whole_data(current_version, file, dump_folder):
    hashstr = hashlib.sha256((
        "".join(file)+current_version
        ).encode()).hexdigest()
    dump_file=os.path.join(dump_folder,hashstr+".h5")
    print(dump_file)
    rebuild_data=True
    if os.path.exists(dump_file):
        print("dump file existed")
        with h5py.File(dump_file,"r") as h5file:
           if "version" in list(h5file.keys()):
               if h5file["version"].value==current_version:
                       rebuild_data=False

    print("rebuild_data",rebuild_data)
    if rebuild_data:
        data=[]
        mat_contents=sio.loadmat(file)
        gt=np.squeeze(mat_contents['data_gt'])
        sparse=np.zeros_like(gt)
        full=np.zeros_like(gt)

        for ind in range(gt.shape[2]):
            img = np.squeeze(gt[:,:,ind])
            theta = np.linspace(0., 180., 1e3, endpoint=False)
            sinogram = radon(img, theta=theta, circle=False)
            theta_down = theta[0:1000:20]
            sparse[:,:,ind] =iradon(sinogram[:,0:1000:20],theta=theta_down,circle=False)
            full[:,:,ind] =iradon(sinogram,theta=theta,circle=False)
            print("iteration : " , ind, "/", gt.shape[2])

        norm_val=np.amax(sparse)
        print("norm_val", norm_val)
        print("finished rebuild")
        saveh5({"label":full/norm_val*255.,"sparse":sparse/norm_val*255.,"version":current_version},dump_file)

    f_handle=h5py.File(dump_file,"r")
    label=np.array(f_handle["label"])
    sparse=np.array(f_handle["sparse"])
    print("size of label, " , label.shape)
    print("size of sparse, " , sparse.shape) 
Example 20
Project: RandomFourierFeatures   Author: tiskw   File: sample_rff_regression.py    MIT License 5 votes vote down vote up
def main():

    ### Fix seed for random fourier feature calclation
    pyrff.seed(111)

    ### Prepare training data
    Xs_train = np.linspace(0, 3, 21).reshape((21, 1))
    ys_train = np.sin(Xs_train**2)
    Xs_test  = np.linspace(0, 3, 101).reshape((101, 1))
    ys_test  = np.sin(Xs_test**2)

    ### Create classifier instance
    reg = pyrff.RFFRegression(dim_output = 8, std = 0.5)

    ### Train regression with random fourier features
    reg.fit(Xs_train, ys_train)

    ### Conduct prediction for the test data
    predict = reg.predict(Xs_test)

    ### Plot regression results
    mpl.figure(0)
    mpl.title("Regression for function y = sin(x^2) with RFF")
    mpl.xlabel("X")
    mpl.ylabel("Y")
    mpl.plot(Xs_train, ys_train, "o")
    mpl.plot(Xs_test,  ys_test,  ".")
    mpl.plot(Xs_test,  predict,  "-")
    mpl.legend(["Training data", "Test data", "Prediction by RFF regression"])
    mpl.grid()
    mpl.show() 
Example 21
Project: voice-recognition   Author: golabies   File: signal_fft.py    MIT License 5 votes vote down vote up
def my_ft(self):
        mft = fft(self.signal)
        freq = np.linspace(-self.fs / 2, self.fs / 2, len(self.signal))
        mft = mft[freq >= 0]
        freq = freq[freq >= 0]
        mft = mft[::-1]
        mft[2:] = 2 * mft[2:]
        self.freq = freq
        self.out_put = abs(mft) / len(self.signal)
        return self.freq, self.out_put 
Example 22
Project: cplot   Author: sunchaoatmo   File: taylorDiagram.py    GNU General Public License v3.0 5 votes vote down vote up
def add_contours(self, levels=5, **kwargs):
        """Add constant centered RMS difference contours."""

        rs,ts = NP.meshgrid(NP.linspace(self.smin,self.smax),
                            NP.linspace(0,NP.pi/2.0))
        # Compute centered RMS difference
        rms = NP.sqrt(self.refstd**2 + rs**2 - 2*self.refstd*rs*NP.cos(ts))
        
        contours = self.ax.contour(ts, rs, rms, levels,linewidths=0.5,ls='--',zorder=0, **kwargs)
        #contours = self.ax.contour(ts, rs, rms, levels,linewidths=0.01, **kwargs)

        return contours 
Example 23
Project: skylab   Author: coenders   File: utils.py    GNU General Public License v3.0 5 votes vote down vote up
def fit(self, data):
        r"""Perform fit given `data`.

        Parameters
        ----------
        data : array_like
            Test statistic values

        Returns
        -------
        delta_exp_frozen
            Probability density function

        """
        data = np.asarray(data)

        # Get amount of over-fluctuations.
        eta = float(np.count_nonzero(data > 0.)) / len(data)
        eta_err = np.sqrt(eta * (1. - eta) / len(data))

        # Sort data and construct cumulative distribution.
        x = np.sort(data[data > 0.])
        y = np.linspace(1., 0., len(x) + 1)[:-1]

        coeff = np.polyfit(x, np.log(y), self.deg)

        return delta_exp(coeff, eta, eta_err) 
Example 24
Project: skylab   Author: coenders   File: utils.py    GNU General Public License v3.0 5 votes vote down vote up
def skymap(plt, vals, **kwargs):
    fig, ax = plt.subplots(subplot_kw=dict(projection="aitoff"))

    gridsize = 1000

    x = np.linspace(np.pi, -np.pi, 2 * gridsize)
    y = np.linspace(np.pi, 0., gridsize)

    X, Y = np.meshgrid(x, y)

    r = hp.rotator.Rotator(rot=(-180., 0., 0.))

    YY, XX = r(Y.ravel(), X.ravel())

    pix = hp.ang2pix(hp.npix2nside(len(vals)), YY, XX)

    Z = np.reshape(vals[pix], X.shape)

    lon = x[::-1]
    lat = np.pi /2.  - y

    cb = kwargs.pop("colorbar", dict())
    cb.setdefault("orientation", "horizontal")
    cb.setdefault("fraction", 0.075)

    title = cb.pop("title", None)

    p = ax.pcolormesh(lon, lat, Z, **kwargs)

    cbar = fig.colorbar(p, **cb)

    cbar.solids.set_edgecolor("face")
    cbar.update_ticks()
    if title is not None:
        cbar.set_label(title)

    ax.xaxis.set_ticks([])

    return fig, ax 
Example 25
Project: deep-learning-note   Author: wdxtub   File: utils.py    MIT License 5 votes vote down vote up
def train_opt(optimizer_fn, states, hyperparams, features, labels,
              batch_size=10, num_epochs=2):
    # 初始化模型
    net, loss = linreg, squared_loss

    w = torch.nn.Parameter(torch.tensor(np.random.normal(0, 0.01, size=(features.shape[1], 1)), dtype=torch.float32),
                           requires_grad=True)
    b = torch.nn.Parameter(torch.zeros(1, dtype=torch.float32), requires_grad=True)

    def eval_loss():
        return loss(net(features, w, b), labels).mean().item()

    ls = [eval_loss()]
    data_iter = torch.utils.data.DataLoader(
        torch.utils.data.TensorDataset(features, labels), batch_size, shuffle=True)

    for _ in range(num_epochs):
        start = time.time()
        for batch_i, (X, y) in enumerate(data_iter):
            l = loss(net(X, w, b), y).mean()  # 使用平均损失

            # 梯度清零
            if w.grad is not None:
                w.grad.data.zero_()
                b.grad.data.zero_()

            l.backward()
            optimizer_fn([w, b], states, hyperparams)  # 迭代模型参数
            if (batch_i + 1) * batch_size % 100 == 0:
                ls.append(eval_loss())  # 每100个样本记录下当前训练误差
    # 打印结果和作图
    print('loss: %f, %f sec per epoch' % (ls[-1], time.time() - start))
    plt.plot(np.linspace(0, num_epochs, len(ls)), ls)
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.show()


# 本函数与原书不同的是这里第一个参数优化器函数而不是优化器的名字
# 例如: optimizer_fn=torch.optim.SGD, optimizer_hyperparams={"lr": 0.05} 
Example 26
Project: deep-learning-note   Author: wdxtub   File: utils.py    MIT License 5 votes vote down vote up
def train_opt_pytorch(optimizer_fn, optimizer_hyperparams, features, labels,
                      batch_size=10, num_epochs=2):
    # 初始化模型
    net = nn.Sequential(
        nn.Linear(features.shape[-1], 1)
    )
    loss = nn.MSELoss()
    optimizer = optimizer_fn(net.parameters(), **optimizer_hyperparams)

    def eval_loss():
        return loss(net(features).view(-1), labels).item() / 2

    ls = [eval_loss()]
    data_iter = torch.utils.data.DataLoader(
        torch.utils.data.TensorDataset(features, labels), batch_size, shuffle=True)

    for _ in range(num_epochs):
        start = time.time()
        for batch_i, (X, y) in enumerate(data_iter):
            # 除以2是为了和train_ch7保持一致, 因为squared_loss中除了2
            l = loss(net(X).view(-1), y) / 2

            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            if (batch_i + 1) * batch_size % 100 == 0:
                ls.append(eval_loss())
    # 打印结果和作图
    print('loss: %f, %f sec per epoch' % (ls[-1], time.time() - start))
    plt.plot(np.linspace(0, num_epochs, len(ls)), ls)
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.show() 
Example 27
Project: neural-pipeline   Author: toodef   File: tensorboard.py    MIT License 5 votes vote down vote up
def update_losses(self, losses: {}) -> None:
        """
        Update monitor

        :param losses: losses values with keys 'train' and 'validation'
        """
        if self.__writer is None:
            return

        def on_loss(name: str, values: np.ndarray) -> None:
            self.__writer.add_scalars('loss', {name: np.mean(values)}, global_step=self.epoch_num)
            self.__writer.add_histogram('{}/loss_hist'.format(name), np.clip(values, -1, 1).astype(np.float32),
                                        global_step=self.epoch_num, bins=np.linspace(-1, 1, num=11).astype(np.float32))

        self._iterate_by_losses(losses, on_loss) 
Example 28
Project: Scene-Understanding   Author: foamliu   File: class_rebal.py    MIT License 5 votes vote down vote up
def smooth_class_prior(sigma=5, do_plot=False):
    prior_prob = np.load(os.path.join(data_dir, "prior_prob.npy"))
    # add an epsilon to prior prob to avoid 0 vakues and possible NaN
    prior_prob += 1E-3 * np.min(prior_prob)
    # renormalize
    prior_prob = prior_prob / (1.0 * np.sum(prior_prob))

    # Smooth with gaussian
    f = interp1d(np.arange(prior_prob.shape[0]), prior_prob)
    xx = np.linspace(0, prior_prob.shape[0] - 1, 1000)
    yy = f(xx)
    window = gaussian(2000, sigma)  # 2000 pts in the window, sigma=5
    smoothed = convolve(yy, window / window.sum(), mode='same')
    fout = interp1d(xx, smoothed)
    prior_prob_smoothed = np.array([fout(i) for i in range(prior_prob.shape[0])])
    prior_prob_smoothed = prior_prob_smoothed / np.sum(prior_prob_smoothed)

    # Save
    file_name = os.path.join(data_dir, "prior_prob_smoothed.npy")
    np.save(file_name, prior_prob_smoothed)

    if do_plot:
        plt.plot(prior_prob)
        plt.plot(prior_prob_smoothed, "g--")
        plt.plot(xx, smoothed, "r-")
        plt.yscale("log")
        plt.show() 
Example 29
Project: synthetic-data-tutorial   Author: theodi   File: SocialSecurityNumberAttribute.py    MIT License 5 votes vote down vote up
def generate_values_as_candidate_key(self, n):
        if n < 1e9:
            values = np.linspace(0, 1e9 - 1, num=n, dtype=int)
            values = np.random.permutation(values)
            values = [str(i).zfill(9) for i in values]
            return ['{}-{}-{}'.format(i[:3], i[3:5], i[5:]) for i in values]
        else:
            raise Exception('The candidate key "{}" cannot generate more than 1e9 distinct values.', self.name) 
Example 30
Project: spacesense   Author: spacesense-ai   File: classification.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def build_model(self, X_train, n=10):
        nu = np.linspace(start=1e-5, stop=1e-2, num=n)
        gamma = np.linspace(start=1e-6, stop=1e-3, num=n)
        opt_diff = 1.0
        opt_nu = None
        opt_gamma = None
        nu_opt, gamma_opt = optimize_OneClassSVM(X_train, n)
        self.svc_models = svm.OneClassSVM(nu=nu_opt, kernel='rbf', gamma=gamma_opt) 
Example 31
Project: helloworld   Author: pip-uninstaller-python   File: matplotlibTest.py    GNU General Public License v2.0 5 votes vote down vote up
def main():
    # line
    x = np.linspace(-np.pi, np.pi, 256, endpoint=True)
    c, s = np.cos(x), np.sin(x)
    plt.figure(1)
    plt.plot(x, c, color="blue", linewidth=1.0, linestyle="-", label="COS", alpha=0.5)  # 自变量, 因变量
    plt.plot(x, s, "r.", label="SIN")  # 正弦  "-"/"r-"/"r."
    plt.title("COS & SIN")
    ax = plt.gca()
    ax.spines["right"].set_color("none")
    ax.spines["top"].set_color("none")
    ax.spines["left"].set_position(("data", 0))  # 横轴位置
    ax.spines["bottom"].set_position(("data", 0))  # 纵轴位置
    ax.xaxis.set_ticks_position("bottom")
    ax.yaxis.set_ticks_position("left")
    plt.xticks([-np.pi, -np.pi / 2.0, np.pi / 2, np.pi],
               [r'$-\pi/2$', r'$-\pi/2$', r'$0$', r'$+\pi/2$', r'$-\pi$'])
    plt.yticks(np.linspace(-1, 1, 5, endpoint=True))
    for label in ax.get_xticklabels() + ax.get_yticklabels():
        label.set_fontsize(16)
        label.set_bbox(dict(facecolor="white", edgecolor="None", alpha=0.2))
    plt.legend(loc="upper left")  # 左上角的显示图标
    plt.grid()  # 网格线
    # plt.axis([-1, 1, -0.5, 1])  # 显示范围
    plt.fill_between(x, np.abs(x) < 0.5, c, c < 0.5, color="green", alpha=0.25)
    t = 1
    plt.plot([t, t], [0, np.cos(t)], "y", linewidth=3, linestyle="--")
    # 注释
    plt.annotate("cos(1)", xy=(t, np.cos(1)), xycoords="data", xytext=(+10, +30),
                 textcoords="offset points", arrowprops=dict(arrowstyle="->", connectionstyle="arc3, rad=.2"))
    plt.show()


# Scatter --> 散点图 
Example 32
Project: numpynet   Author: uptake   File: examples.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def plot_activations():
    for activation in common.Activation.available:
        x = np.linspace(-10.0, 10.0, 100)
        y = common.Activation(activation).function(x, deriv=False)
        dy = common.Activation(activation).function(x, deriv=True)
        viz_client.plot_func(x, y, title=activation)
        viz_client.plot_func(x, dy, title="d_" + activation)


# TODO write this! 
Example 33
Project: gullikson-scripts   Author: kgullikson88   File: SpectralTypeRelations.py    MIT License 5 votes vote down vote up
def get_color(self, fv, temperature, search_range='valid'):
        """
        Get the color, given the temperature (root-finding)
        :param fv: The FitVals object to use. Should be one of the self.color_relations
        :param temperature: The temperature for which you want a color
        :param search_range: The range of colors to search. The default is the full valid range of the fit.
                             You can extend it if you want by giving a list-like object, but it will give
                             you a warning if the best fit is an extrapolation.
        :return: The color corresponding to the requested temperature
        """
        from kglib.utils import HelperFunctions
        # Determine the test values from search_range
        if isinstance(search_range, str) and search_range.lower() == 'valid':
            test_values = np.linspace(fv.valid[0], fv.valid[1], 1000)
        else:
            test_values = np.linspace(search_range[0], search_range[1], 1000)

        # Evaluate the function at each of the test colors
        test_temperatures = self.evaluate(fv, test_values, is_spt=False)

        # Determine the 'best fit' solution
        temperature = np.array(temperature)
        differences = (temperature.reshape(1, -1) - test_temperatures.reshape(-1, 1))
        idx = np.abs(differences).argmin(axis=0)
        color = test_values[idx]

        # Check if the best-fit solution is an extrapolation
        if HelperFunctions.IsListlike(search_range):
            if HelperFunctions.IsListlike(color):
                if not all([fv.valid[0] < c < fv.valid[1] for c in color]):
                    logging.warn('Best-fit color is an extrapolation from the valid range. Be very careful!')
            elif not fv.valid[0] < color < fv.valid[1]:
                logging.warn('Best-fit color is an extrapolation from the valid range. Be very careful!')
        return color 
Example 34
Project: ANN   Author: waynezv   File: ANN_large_v23.py    MIT License 5 votes vote down vote up
def add_dist_prior(input):
    num_lines = 128
    d_theta = 0.7/180*np.pi
    i_start = 63
    theta_s = -num_lines/2*d_theta + (i_start-1)*d_theta
    theta_e = theta_s + 67*d_theta
    theta = np.arange(theta_s, theta_e, d_theta)
    x = np.linspace(-50,50,32)/1000
    z = np.linspace(20,80,32)/1000
    x = np.repeat(x.reshape(1,32),32,0)
    z = np.repeat(z.reshape(32,1),32,1)
    fc = 5e6
    c = 1540
    l = c/fc
    ele_width = l/2
    kerf = 0.0025/1000
    pitch = ele_width+kerf
    tx_pos_x = np.linspace(-pitch/2-31*pitch, pitch/2+31*pitch, 64)
    tx_pos_z = np.zeros((64,))
    dist = np.matrix(np.zeros((64,1024)))
    for i in range(64):
        dist[i,:] =np.sqrt( \
                (tx_pos_x[i]-x)**2 + \
                (tx_pos_z[i]-z)**2 \
                ).reshape(-1)
    ns, nch, nr, nc = input.shape
    input_new = np.zeros((ns,nch,nr,nc))
    for nsi in range(ns):
        for nchi in range(nch):
            tmp = np.matrix(input[nsi,nchi,:,:])
            input_new[nsi,nchi,:,:] = \
                   tmp*dist*dist.T
    return input_new 
Example 35
Project: ANN   Author: waynezv   File: ANN_large_v22.py    MIT License 5 votes vote down vote up
def add_dist_prior(input):
    num_lines = 128
    d_theta = 0.7/180*np.pi
    i_start = 63
    theta_s = -num_lines/2*d_theta + (i_start-1)*d_theta
    theta_e = theta_s + 67*d_theta
    theta = np.arange(theta_s, theta_e, d_theta)
    x = np.linspace(-50,50,32)/1000
    z = np.linspace(20,80,32)/1000
    x = np.repeat(x.reshape(1,32),32,0)
    z = np.repeat(z.reshape(32,1),32,1)
    fc = 5e6
    c = 1540
    l = c/fc
    ele_width = l/2
    kerf = 0.0025/1000
    pitch = ele_width+kerf
    tx_pos_x = np.linspace(-pitch/2-31*pitch, pitch/2+31*pitch, 64)
    tx_pos_z = np.zeros((64,))
    dist = np.matrix(np.zeros((64,1024)))
    for i in range(64):
        dist[i,:] =np.sqrt( \
                (tx_pos_x[i]-x)**2 + \
                (tx_pos_z[i]-z)**2 \
                ).reshape(-1)
    ns, nch, nr, nc = input.shape
    input_new = np.zeros((ns,nch,nr,nc))
    for nsi in range(ns):
        for nchi in range(nch):
            tmp = np.matrix(input[nsi,nchi,:,:])
            input_new[nsi,nchi,:,:] = \
                   tmp*dist*dist.T
    return input_new 
Example 36
Project: ANN   Author: waynezv   File: ANN_large_v24.py    MIT License 5 votes vote down vote up
def add_dist_prior(input):
    num_lines = 128
    d_theta = 0.7/180*np.pi
    i_start = 63
    theta_s = -num_lines/2*d_theta + (i_start-1)*d_theta
    theta_e = theta_s + 67*d_theta
    theta = np.arange(theta_s, theta_e, d_theta)
    x = np.linspace(-50,50,32)/1000
    z = np.linspace(20,80,32)/1000
    x = np.repeat(x.reshape(1,32),32,0)
    z = np.repeat(z.reshape(32,1),32,1)
    fc = 5e6
    c = 1540
    l = c/fc
    ele_width = l/2
    kerf = 0.0025/1000
    pitch = ele_width+kerf
    tx_pos_x = np.linspace(-pitch/2-31*pitch, pitch/2+31*pitch, 64)
    tx_pos_z = np.zeros((64,))
    dist = np.matrix(np.zeros((64,1024)))
    for i in range(64):
        dist[i,:] =np.sqrt( \
                (tx_pos_x[i]-x)**2 + \
                (tx_pos_z[i]-z)**2 \
                ).reshape(-1)
    ns, nch, nr, nc = input.shape
    input_new = np.zeros((ns,nch,nr,nc))
    for nsi in range(ns):
        for nchi in range(nch):
            tmp = np.matrix(input[nsi,nchi,:,:])
            input_new[nsi,nchi,:,:] = \
                   tmp*dist*dist.T
    return input_new 
Example 37
Project: featkit   Author: ryadzenine   File: numerical.py    MIT License 5 votes vote down vote up
def fit(self, X, y=None):
        self.pc = np.percentile(X, np.linspace(100 / self.nb_bins, 100 - 100 / self.nb_bins, self.nb_bins))
        return self 
Example 38
Project: kitti-object-eval-python   Author: traveller59   File: eval.py    MIT License 5 votes vote down vote up
def do_coco_style_eval(gt_annos,
                       dt_annos,
                       current_classes,
                       overlap_ranges,
                       compute_aos,
                       z_axis=1,
                       z_center=1.0):
    # overlap_ranges: [range, metric, num_class]
    min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]])
    for i in range(overlap_ranges.shape[1]):
        for j in range(overlap_ranges.shape[2]):
            min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j])
    mAP_bbox, mAP_bev, mAP_3d, mAP_aos = do_eval_v2(
        gt_annos,
        dt_annos,
        current_classes,
        min_overlaps,
        compute_aos,
        z_axis=z_axis,
        z_center=z_center)
    # ret: [num_class, num_diff, num_minoverlap]
    mAP_bbox = mAP_bbox.mean(-1)
    mAP_bev = mAP_bev.mean(-1)
    mAP_3d = mAP_3d.mean(-1)
    if mAP_aos is not None:
        mAP_aos = mAP_aos.mean(-1)
    return mAP_bbox, mAP_bev, mAP_3d, mAP_aos 
Example 39
Project: pygram11   Author: douglasdavis   File: test_histogram.py    MIT License 5 votes vote down vote up
def test_fix1d():
    x = np.random.randn(5000)
    bins = 25
    w = np.random.uniform(0.5, 1.0, 5000)

    pygram_h, __ = pygram11.fix1d(x, bins=25, range=(-3, 3))
    numpy_h, __ = np.histogram(x, bins=np.linspace(-3, 3, 26))
    npt.assert_almost_equal(pygram_h, numpy_h, 5)

    pygram_h, __ = pygram11.fix1d(x, bins=25, range=(-3, 3), weights=w)
    numpy_h, __ = np.histogram(x, bins=np.linspace(-3, 3, 26), weights=w)
    npt.assert_almost_equal(pygram_h, numpy_h, 5) 
Example 40
Project: pygram11   Author: douglasdavis   File: test_histogram.py    MIT License 5 votes vote down vote up
def test_numpyAPI_fix1d():
    x = np.random.randn(5000)
    bins = 25
    w = np.random.uniform(0.8, 1, 5000)

    pygram_h, __ = pygram11.histogram(x, bins=25, range=(-3, 3))
    numpy_h, __ = np.histogram(x, bins=np.linspace(-3, 3, 26))
    npt.assert_almost_equal(pygram_h, numpy_h, 5)

    pygram_h, __ = pygram11.histogram(x, bins=25, range=(-3, 3), weights=w)
    numpy_h, __ = np.histogram(x, bins=np.linspace(-3, 3, 26), weights=w)
    npt.assert_almost_equal(pygram_h, numpy_h, 5) 
Example 41
Project: pygram11   Author: douglasdavis   File: test_histogram.py    MIT License 5 votes vote down vote up
def test_fix1d_omp():
        x = np.random.randn(5000)
        bins = 25
        w = np.random.uniform(-0.2, 0.8, 5000)

        pygram_h, __ = pygram11.fix1d(x, bins=25, range=(-3, 3), omp=True)
        numpy_h, __ = np.histogram(x, bins=np.linspace(-3, 3, 26))
        npt.assert_almost_equal(pygram_h, numpy_h, 5)

        pygram_h, __ = pygram11.fix1d(x, bins=25, range=(-3, 3), weights=w, omp=True)
        numpy_h, __ = np.histogram(x, bins=np.linspace(-3, 3, 26), weights=w)
        npt.assert_almost_equal(pygram_h, numpy_h, 5) 
Example 42
Project: pygram11   Author: douglasdavis   File: test_histogram.py    MIT License 5 votes vote down vote up
def test_density_fix1d():
    x = np.random.randn(5000)
    bins = 25
    w = np.random.uniform(0.5, 1.0, 5000)

    pygram_h, __ = pygram11.fix1d(x, bins=25, range=(-3, 3), density=True)
    numpy_h, __ = np.histogram(x, bins=np.linspace(-3, 3, 26), density=True)
    npt.assert_almost_equal(pygram_h, numpy_h, 5)

    pygram_h, __ = pygram11.fix1d(x, bins=25, range=(-3, 3), weights=w, density=True)
    numpy_h, __ = np.histogram(x, bins=np.linspace(-3, 3, 26), weights=w, density=True)
    npt.assert_almost_equal(pygram_h, numpy_h, 5) 
Example 43
Project: HAPI   Author: MAfarrag   File: DistParameters.py    MIT License 5 votes vote down vote up
def calculateK(x,position,UB,LB):
    """
    ===================================================
        calculateK(x,position,UB,LB):
    ===================================================
        
    this function takes value of x parameter and generate 100 random value of k parameters between
    upper & lower constraint then the output will be the value coresponding to the giving position
    
    Inputs:
    ----------
        1- x weighting coefficient to determine the linearity of the water surface
            (one of the parameters of muskingum routing method)
        2- position 
            random position between upper and lower bounds of the k parameter
        3-UB 
            upper bound for k parameter
        3-LB 
            Lower bound for k parameter
    """
    
    constraint1=0.5*1/(1-x) # k has to be smaller than this constraint
    constraint2=0.5*1/x   # k has to be greater than this constraint
    
    if constraint2 >= UB : #if constraint is higher than UB take UB
        constraint2 =UB
        
    if constraint1 <= LB : #if constraint is lower than LB take UB
        constraint1 =LB
    
    generatedK=np.linspace(constraint1,constraint2,101)
    k=generatedK[int(round(position,0))]
    return k 
Example 44
Project: HAPI   Author: MAfarrag   File: DHBV_functions0000000000000.py    MIT License 5 votes vote down vote up
def calculateK(x,position,UB,LB):
    '''
    calculateK(x,position,UB,LB):
        this function takes value of x parameter and generate 100 random value of k parameters between
        upper & lower constraint then the output will be the value coresponding to the giving position
        
        Inputs:
            1- x weighting coefficient to determine the linearity of the water surface
                (one of the parameters of muskingum routing method)
            2- position 
                random position between upper and lower bounds of the k parameter
            3-UB 
                upper bound for k parameter
            3-LB 
                Lower bound for k parameter
    '''
    constraint1=0.5*1/(1-x) # k has to be smaller than this constraint
    constraint2=0.5*1/x   # k has to be greater than this constraint
    
    if constraint2 >= UB : #if constraint is higher than UB take UB
        constraint2 =UB
        
    if constraint1 <= LB : #if constraint is lower than LB take UB
        constraint1 =LB
    
    generatedK=np.linspace(constraint1,constraint2,101)
    k=generatedK[int(round(position,0))]
    return k 
Example 45
Project: HAPI   Author: MAfarrag   File: trial2.py    MIT License 5 votes vote down vote up
def update_data(attrname, old, new):

    # Get the current slider values
    a = amplitude.value
    b = offset.value
    w = phase.value
    k = freq.value

    # Generate the new curve
    x = np.linspace(0, 4*np.pi, N)
    y = a*np.sin(k*x + w) + b

    source.data = dict(x=x, y=y) 
Example 46
Project: HAPI   Author: MAfarrag   File: DistParameters.py    MIT License 5 votes vote down vote up
def calculateK(x,position,UB,LB):
    """
    ===================================================
        calculateK(x,position,UB,LB):
    ===================================================
        
    this function takes value of x parameter and generate 100 random value of k parameters between
    upper & lower constraint then the output will be the value coresponding to the giving position
    
    Inputs:
    ----------
        1- x weighting coefficient to determine the linearity of the water surface
            (one of the parameters of muskingum routing method)
        2- position 
            random position between upper and lower bounds of the k parameter
        3-UB 
            upper bound for k parameter
        3-LB 
            Lower bound for k parameter
    """
    
    constraint1=0.5*1/(1-x) # k has to be smaller than this constraint
    constraint2=0.5*1/x   # k has to be greater than this constraint
    
    if constraint2 >= UB : #if constraint is higher than UB take UB
        constraint2 =UB
        
    if constraint1 <= LB : #if constraint is lower than LB take UB
        constraint1 =LB
    
    generatedK=np.linspace(constraint1,constraint2,101)
    k=generatedK[int(round(position,0))]
    return k 
Example 47
Project: HAPI   Author: MAfarrag   File: DistParameters.py    MIT License 5 votes vote down vote up
def calculateK(x,position,UB,LB):
    """
    ===================================================
        calculateK(x,position,UB,LB):
    ===================================================
        
    this function takes value of x parameter and generate 100 random value of k parameters between
    upper & lower constraint then the output will be the value coresponding to the giving position
    
    Inputs:
    ----------
        1- x weighting coefficient to determine the linearity of the water surface
            (one of the parameters of muskingum routing method)
        2- position 
            random position between upper and lower bounds of the k parameter
        3-UB 
            upper bound for k parameter
        3-LB 
            Lower bound for k parameter
    """
    
    constraint1=0.5*1/(1-x) # k has to be smaller than this constraint
    constraint2=0.5*1/x   # k has to be greater than this constraint
    
    if constraint2 >= UB : #if constraint is higher than UB take UB
        constraint2 =UB
        
    if constraint1 <= LB : #if constraint is lower than LB take UB
        constraint1 =LB
    
    generatedK=np.linspace(constraint1,constraint2,101)
    k=generatedK[int(round(position,0))]
    return k 
Example 48
Project: praktipy   Author: The-Ludwig   File: __init__.py    MIT License 5 votes vote down vote up
def polyplotfit(x, params, N=1000, border=0.05):
    """Plots a polynome, which was fitted.
    x: the original x value which was fitted
    params: the parameters in the polynome
    N: Number of x_values to calculate
    border: percentage of x_range to make a border"""

    dx = x[-1] - x[0]
    x_fit = np.linspace(x[0] - dx*border, x[-1] + dx*border, N)
    y_fit = np.zeros(len(x_fit))
    deg = len(params)
    for i in range(deg):
        y_fit += params[deg-1-i] * x_fit**i

    return (x_fit, y_fit) 
Example 49
Project: praktipy   Author: The-Ludwig   File: __init__.py    MIT License 5 votes vote down vote up
def curveplotfit(f, x, params, N=1000, border=0.05, logscale=False):
    """Plots a general function, which was fitted.
    x: the original x value which was fitted
    params: the parameters in the polynome
    N: Number of x_values to calculate
    border: percentage of x_range to make a border"""
    dx = x[-1] - x[0]
    if logscale:
        x_fit = np.logspace(np.log10(
            x[0]) - np.log10(x[0])*border*0.6, np.log10(x[-1])+np.log10(x[-1])*border*0.2, N)
    else:
        x_fit = np.linspace(x[0] - dx*border, x[-1] + dx*border, N)
    y_fit = f(x_fit, *params)

    return (x_fit, y_fit) 
Example 50
Project: praktipy   Author: The-Ludwig   File: praktiplot.py    MIT License 5 votes vote down vote up
def polyplotfit(x, params, N=1000, border=0.05):
    """Plots a polynome, which was fitted.
    x: the original x value which was fitted
    params: the parameters in the polynome
    N: Number of x_values to calculate
    border: percentage of x_range to make a border"""

    dx = x[-1] - x[0]
    x_fit = np.linspace(x[0] - dx*border, x[-1] + dx*border, N)
    y_fit = np.zeros(len(x_fit))
    deg = len(params)
    for i in range(deg):
        y_fit += params[deg-1-i] * x_fit**i

    return (x_fit, y_fit) 
Example 51
Project: praktipy   Author: The-Ludwig   File: praktiplot.py    MIT License 5 votes vote down vote up
def curveplotfit(f, x, params, N=1000, border=0.05, logscale=False):
    """Plots a general function, which was fitted.
    x: the original x value which was fitted
    params: the parameters in the polynome
    N: Number of x_values to calculate
    border: percentage of x_range to make a border"""
    dx = x[-1] - x[0]
    if logscale:
        x_fit = np.logspace(np.log10(
            x[0]) - np.log10(x[0])*border*0.6, np.log10(x[-1])+np.log10(x[-1])*border*0.2, N)
    else:
        x_fit = np.linspace(x[0] - dx*border, x[-1] + dx*border, N)
    y_fit = f(x_fit, *params)

    return (x_fit, y_fit) 
Example 52
Project: pysine   Author: lneuhaus   File: pysine.py    MIT License 5 votes vote down vote up
def sine(self, frequency=440.0, duration=1.0):
        points = int(self.BITRATE * duration)
        try:
            times = np.linspace(0, duration, points, endpoint=False)
            data = np.array((np.sin(times*frequency*2*np.pi) + 1.0)*127.5, dtype=np.int8).tostring()
        except:  # do it without numpy
            data = ''
            omega = 2.0*pi*frequency/self.BITRATE
            for i in range(points):
                data += chr(int(127.5*(1.0+sin(float(i)*omega))))
        self.stream.write(data) 
Example 53
Project: cascade-rcnn_Pytorch   Author: guoruoqian   File: cocoeval.py    MIT License 5 votes vote down vote up
def setDetParams(self):
        self.imgIds = []
        self.catIds = []
        # np.arange causes trouble.  the data point on arange is slightly larger than the true value
        self.iouThrs = np.linspace(.5, 0.95, np.round((0.95 - .5) / .05) + 1, endpoint=True)
        self.recThrs = np.linspace(.0, 1.00, np.round((1.00 - .0) / .01) + 1, endpoint=True)
        self.maxDets = [1, 10, 100]
        self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
        self.areaRngLbl = ['all', 'small', 'medium', 'large']
        self.useCats = 1 
Example 54
Project: cascade-rcnn_Pytorch   Author: guoruoqian   File: cocoeval.py    MIT License 5 votes vote down vote up
def setKpParams(self):
        self.imgIds = []
        self.catIds = []
        # np.arange causes trouble.  the data point on arange is slightly larger than the true value
        self.iouThrs = np.linspace(.5, 0.95, np.round((0.95 - .5) / .05) + 1, endpoint=True)
        self.recThrs = np.linspace(.0, 1.00, np.round((1.00 - .0) / .01) + 1, endpoint=True)
        self.maxDets = [20]
        self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
        self.areaRngLbl = ['all', 'medium', 'large']
        self.useCats = 1 
Example 55
Project: ros_dmp   Author: abhishek098   File: dmp_rhythmic.py    Apache License 2.0 5 votes vote down vote up
def gen_centers(self):
        """Set the centre of the Gaussian basis
        functions be spaced evenly throughout run time"""

        c = np.linspace(0, 2*np.pi, self.n_bfs+1)
        c = c[0:-1]
        self.c = c 
Example 56
Project: xrft   Author: xgcm   File: test_xrft.py    MIT License 4 votes vote down vote up
def test_detrend():
    N = 16
    x = np.arange(N+1)
    y = np.arange(N-1)
    t = np.linspace(-int(N/2), int(N/2), N-6)
    z = np.arange(int(N/2))
    d4d = (t[:,np.newaxis,np.newaxis,np.newaxis]
            + z[np.newaxis,:,np.newaxis,np.newaxis]
            + y[np.newaxis,np.newaxis,:,np.newaxis]
            + x[np.newaxis,np.newaxis,np.newaxis,:]
          )
    da4d = xr.DataArray(d4d, dims=['time','z','y','x'],
                     coords={'time':range(len(t)),'z':range(len(z)),'y':range(len(y)),
                             'x':range(len(x))}
                     )

    func = xrft.detrend_wrap(xrft.detrendn)

    #########
    # Chunk along the `time` axis
    #########
    da = da4d.chunk({'time': 1})
    with pytest.raises(ValueError):
        func(da.data, axes=[0]).compute
    with pytest.raises(ValueError):
        func(da.data, axes=[0,1,2,3]).compute()
    da_prime = func(da.data, axes=[2]).compute()
    npt.assert_allclose(da_prime[0,0], sps.detrend(d4d[0,0], axis=0))
    da_prime = func(da.data, axes=[1,2,3]).compute()
    npt.assert_allclose(da_prime[0],
                        xrft.detrendn(d4d[0], axes=[0,1,2]))

    #########
    # Chunk along the `time` and `z` axes
    #########
    da = da4d.chunk({'time':1, 'z':1})
    with pytest.raises(ValueError):
        func(da.data, axes=[1,2]).compute()
    with pytest.raises(ValueError):
        func(da.data, axes=[2,2]).compute()
    da_prime = func(da.data, axes=[2,3]).compute()
    npt.assert_allclose(da_prime[0,0],
                        xrft.detrendn(d4d[0,0], axes=[0,1])) 
Example 57
Project: prediction-constrained-topic-models   Author: dtak   File: utils_calibration.py    MIT License 4 votes vote down vote up
def plot_binary_clf_calibration_curve_and_histograms(
        info_per_bin=None,
        fig_kws=dict(
            figsize=(1.4*3, 1.4*4),
            tight_layout=True),
        ):
    fig_h = plt.figure(**fig_kws)
    ax_grid = gridspec.GridSpec(
        nrows=4, ncols=1,
        height_ratios=[1, 1, 4, 0.1],
        )
    ax_cal = fig_h.add_subplot(ax_grid[2,0])
    ax_TP = fig_h.add_subplot(ax_grid[0,0])
    ax_TN = fig_h.add_subplot(ax_grid[1,0])

    # Plot calibration curve
    # First, lay down idealized line from 0-1
    unit_grid = np.linspace(0, 1, 10)
    ax_cal.plot(
        unit_grid, unit_grid, 'k--', alpha=0.5)
    # Then, plot actual-vs-expected fractions on top
    ax_cal.plot(
        info_per_bin['xcenter_per_bin'],
        info_per_bin['fracTP_per_bin'],
        'ks-')
    ax_cal.set_ylabel('frac. true positive')
    ax_cal.set_xlabel('predicted proba.')

    # Plot TP histogram
    ax_TP.bar(
        info_per_bin['xcenter_per_bin'],
        info_per_bin['countTP_per_bin'],
        width=0.9*info_per_bin['xwidth_per_bin'],
        color='b')

    # Plot TN histogram
    ax_TN.bar(
        info_per_bin['xcenter_per_bin'],
        info_per_bin['countTN_per_bin'],
        width=0.9*info_per_bin['xwidth_per_bin'],
        color='r')
    for ax in [ax_cal, ax_TP, ax_TN]:
        ax.set_xlim([0, 1])
    ax_cal.set_ylim([0, 1]) 
Example 58
Project: prediction-constrained-topic-models   Author: dtak   File: utils_calibration.py    MIT License 4 votes vote down vote up
def calc_binary_clf_calibration_per_bin(
        y_true, y_prob,
        bins=10):
    """ 
    """
    if y_prob.min() < 0 or y_prob.max() > 1:
        raise ValueError("y_prob has values outside [0, 1]")

    bins = np.asarray(bins)
    if bins.ndim == 1 and bins.size > 1:
        bin_edges = bins
    else:
        bin_edges = np.linspace(0, 1, int(bins) + 1)
    if bin_edges[-1] == 1.0:
        bin_edges[-1] += 1e-8
    assert bin_edges.ndim == 1
    assert bin_edges.size > 2
    nbins = bin_edges.size - 1
    # Assign each predicted probability into one bin
    # from 0, 1, ... nbins
    binids = np.digitize(y_prob, bin_edges) - 1
    assert binids.max() <= nbins
    assert binids.min() >= 0

    count_per_bin = np.bincount(binids, minlength=nbins)
    countTP_per_bin = np.bincount(binids, minlength=nbins, weights=y_true == 1)
    countTN_per_bin = np.bincount(binids, minlength=nbins, weights=y_true == 0)

    # This divide will (and should) yield nan
    # if any bin has no content
    fracTP_per_bin = countTP_per_bin / np.asarray(count_per_bin, dtype=np.float64)

    info_per_bin = dict(
        count_per_bin=count_per_bin,
        countTP_per_bin=countTP_per_bin,
        countTN_per_bin=countTN_per_bin,
        fracTP_per_bin=fracTP_per_bin,
        xcenter_per_bin=0.5 * (bin_edges[:-1] + bin_edges[1:]),
        xwidth_per_bin=(bin_edges[1:] - bin_edges[:-1]),
        bin_edges=bin_edges,
        )
    return info_per_bin 
Example 59
Project: FRIDA   Author: LCAV   File: bands_selection.py    MIT License 4 votes vote down vote up
def select_bands(samples, freq_range, fs, nfft, win, n_bands, div=1):
    '''
    Selects the bins with most energy in a frequency range.

    It is possible to specify a div factor. Then the range is subdivided
    into div equal subbands and n_bands / div per subband are selected.
    '''

    if win is not None and isinstance(win, bool):
        if win:
            win = np.hanning(nfft)
        else:
            win = None

    # Read the signals in a single array
    sig = [wavfile.read(s)[1] for s in samples]
    L = max([s.shape[0] for s in sig])
    signals = np.zeros((L,len(samples)), dtype=np.float32)
    for i in range(signals.shape[1]):
        signals[:sig[i].shape[0],i] = sig[i] / np.std(sig[i][sig[i] > 1e-2])

    sum_sig = np.sum(signals, axis=1)

    sum_STFT = pra.stft(sum_sig, nfft, nfft, win=win, transform=rfft).T
    sum_STFT_avg = np.mean(np.abs(sum_STFT)**2, axis=1)

    # Do some band selection
    bnds = np.linspace(freq_range[0], freq_range[1], div+1)

    freq_hz = np.zeros(n_bands)
    freq_bins = np.zeros(n_bands, dtype=int)

    nsb = n_bands // div

    for i in range(div):

        bl = int(bnds[i] / fs * nfft)
        bh = int(bnds[i+1] / fs * nfft)

        k = np.argsort(sum_STFT_avg[bl:bh])[-nsb:]

        freq_hz[nsb*i:nsb*(i+1)] = (bl + k) / nfft * fs
        freq_bins[nsb*i:nsb*(i+1)] = k + bl

    freq_hz = freq_hz[:n_bands]

    return np.unique(freq_hz), np.unique(freq_bins) 
Example 60
Project: FRIDA   Author: LCAV   File: doa.py    MIT License 4 votes vote down vote up
def __init__(self, L, fs, nfft, c=343.0, num_src=1, mode='far', r=None, 
        theta=None, phi=None):

        self.L = L              # locations of mics
        self.fs = fs            # sampling frequency
        self.c = c              # speed of sound
        self.M = L.shape[1]     # number of microphones
        self.D = L.shape[0]     # number of dimensions (x,y,z)
        self.num_snap = None    # number of snapshots

        self.nfft = nfft
        self.max_bin = int(self.nfft/2) + 1
        self.freq_bins = None
        self.freq_hz = None
        self.num_freq = None

        self.num_src = self._check_num_src(num_src)
        self.sources = np.zeros([self.D, self.num_src])
        self.src_idx = np.zeros(self.num_src, dtype=np.int)
        self.phi_recon = None

        self.mode = mode
        if self.mode is 'far':
            self.r = np.ones(1)
        elif r is None:
            self.r = np.ones(1)
            self.mode = 'far'
        else:
            self.r = r
            if r == np.ones(1):
                mode = 'far'
        if theta is None:
            self.theta = np.linspace(-180., 180., 30) * np.pi / 180
        else:
            self.theta = theta
        if phi is None:
            self.phi = np.pi / 2 * np.ones(1)
        else:
            self.phi = phi

        # spatial spectrum / dirty image (FRI)
        self.P = None

        # build lookup table to candidate locations from r, theta, phi 
        from fri import FRI
        if not isinstance(self, FRI):
            self.loc = None
            self.num_loc = None
            self.build_lookup()
            self.mode_vec = None
            self.compute_mode()
        else:   # no grid search for FRI
            self.num_loc = len(self.theta) 
Example 61
Project: DataHack2018   Author: InnovizTech   File: vis_utils.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def __init__(self, max_r):
        self.max_r = max_r
        format = GeomVertexFormat.getV3c4()
        vdata = GeomVertexData('point', format, Geom.UHDynamic)
        self._pos_writer = GeomVertexWriter(vdata, 'vertex')
        self._color_writer = GeomVertexWriter(vdata, 'color')

        line_num = 60
        vdata.setNumRows(line_num)

        angles = np.linspace(0, np.pi * 2 - np.pi * 2 / line_num , line_num)

        other_rgba = (0., 0., 0.3, 0.1)
        other2_rgba = (0.1, 0.1, 0.4, 0.4)
        axis_rgba = (0.2, 0.2, 0.5, 1.0)
        max_r = 250
        for indx, angle in enumerate(angles):
            if indx % 5 == 0:
                rgba = axis_rgba
            else:
                rgba = other_rgba
            self._pos_writer.addData3d(0, 0, 0.)
            self._color_writer.addData4f(rgba[0], rgba[1], rgba[2], rgba[3])
            self._pos_writer.addData3d(max_r * np.sin(angle), max_r * np.cos(angle), 0.)
            self._color_writer.addData4f(rgba[0], rgba[1], rgba[2], rgba[3])

        grnd_prmtv = GeomLines(Geom.UHStatic)
        grnd_prmtv.addConsecutiveVertices(0, 2 * line_num)
        grnd_prmtv.closePrimitive()
        ground_geom = Geom(vdata)
        ground_geom.addPrimitive(grnd_prmtv)
        snode = GeomNode('ground_lines')
        snode.addGeom(ground_geom)

        self.points_node = base.render.attachNewNode(snode)
        self.points_node.setTwoSided(True)

        for rad in range(int(max_r)):
            color = axis_rgba
            pp = makeArc(angleDegrees=360, numSteps=160, scale=rad, color=color)
            tn = TextNode('dd')
            tn.setText(str(rad))
            tn.setTextScale(0.2)
            tn.setTextColor(color)
            text_geom = GeomNode('text')
            text_geom.addChild(tn)
            tp = NodePath(text_geom)
            tp.setPos((0, rad-0.2, 0))
            tp.setHpr((0, -90, 0))
            tp.reparentTo(self.points_node)
            pp.reparentTo(self.points_node) 
Example 62
Project: kuaa   Author: rafaelwerneck   File: common.py    GNU General Public License v3.0 4 votes vote down vote up
def gridSearch(iterations, max_or_min, of, *params): # PEDRORMJUNIOR: 20130806: add max_or_min
    """
    Input:
    iterations = number of iterations
    of = objective function.  A function that receives 'n' parameters and return an accuracy measure.
    *params = lists of parameters.  The first list constains values to be grid searched as the first parameters of the 'of'.  The second list contains values to be grid searched as the second parameters of the 'of'.  And so on...

    Output:
    ret = a list of parameters of size 'n'.  The best combination of parameters according to the 'of'.
    """
    yellow_err('gridSearch(): iterations = {0}'.format(iterations))

    assert max_or_min in ['min', 'max']

    lenranges = map(len, params)
    assert all(map(lambda x: x > 1, lenranges))
    positions = map(lambda i: int(np.prod(lenranges[:i])),
                    range(len(lenranges)))

    meshparams = mymeshgrid(*params)

    results = []
    for i in range(len(meshparams)):
        res = (of(*meshparams[i]),
               (-i if max_or_min == 'min' else i),
               i)
        results.append(res)
    _, _, best_params_idx = max(results)
    yellow_err('gridSearch(): best_params = {0}'.format(meshparams[best_params_idx]))

    if iterations == 0:
        ret = meshparams[best_params_idx]
    else:
        shifts = map(lambda i: abs(meshparams[0][i] - meshparams[positions[i]][i]) / 2.0,
                     range(len(lenranges)))

        newparams = map(lambda i: np.linspace(meshparams[best_params_idx][i] - shifts[i],
                                         meshparams[best_params_idx][i] + shifts[i],
                                         num=lenranges[i]).tolist(),
                        range(len(lenranges)))

        ret = gridSearch(iterations - 1, max_or_min, of, *newparams)

    return ret 
Example 63
Project: neural-fingerprinting   Author: StephanZheng   File: utils.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def linear_extrapolation_plot(log_prob_adv_array, y, file_name,
                              min_epsilon=-10, max_epsilon=10,
                              num_points=21):
    """Generate linear extrapolation plot.

    Args:
        log_prob_adv_array: Numpy array containing log probabilities
        y: Tf placeholder for the labels
        file_name: Plot filename
        min_epsilon: Minimum value of epsilon over the interval
        max_epsilon: Maximum value of epsilon over the interval
        num_points: Number of points used to interpolate
    """
    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.pyplot as plt

    figure = plt.figure()
    figure.canvas.set_window_title('Cleverhans: Linear Extrapolation Plot')

    correct_idx = np.argmax(y, axis=0)
    fig = plt.figure()
    plt.xlabel('Epsilon')
    plt.ylabel('Logits')
    x_axis = np.linspace(min_epsilon, max_epsilon, num_points)
    plt.xlim(min_epsilon - 1, max_epsilon + 1)
    for i in xrange(y.shape[0]):
        if i == correct_idx:
            ls = '-'
            linewidth = 5
        else:
            ls = '--'
            linewidth = 2
        plt.plot(
            x_axis,
            log_prob_adv_array[:, i],
            ls=ls,
            linewidth=linewidth,
            label='{}'.format(i))
    plt.legend(loc='best', fontsize=14)
    plt.show()
    fig.savefig(file_name)
    plt.clf()
    return figure 
Example 64
Project: programsynthesishunting   Author: flexgp   File: save_plots.py    GNU General Public License v3.0 4 votes vote down vote up
def save_pareto_fitness_plot():
    """
    Saves a plot of the current fitness for a pareto front.

    :return: Nothing
    """

    from algorithm.parameters import params

    # Initialise up figure instance.
    fig = plt.figure()
    ax1 = fig.add_subplot(1, 1, 1)

    # Set up iterator for color plotting.
    color = iter(plt.cm.jet(np.linspace(0, 1, len(first_pareto_list))))

    # Get labels for individual fitnesses.
    ffs = params['FITNESS_FUNCTION'].fitness_functions

    # Find the direction for step lines to "bend"
    step_dir = 'pre' if ffs[0].maximise else 'post'

    # Plot data.
    for i, gen in enumerate(first_pareto_list):
        c = next(color)
        ax1.step(gen[0], gen[1], linestyle='--',
                 where=step_dir, color=c, lw=0.35, alpha=0.25)
        ax1.plot(gen[0], gen[1], 'o', color=c, ms=1)

    # Set labels with class names.
    ax1.set_xlabel(ffs[0].__class__.__name__, fontsize=14)
    ax1.set_ylabel(ffs[1].__class__.__name__, fontsize=14)

    # Plot title and legend.
    plt.title("First pareto fronts by generation")

    # Set up colorbar instead of legend. Normalise axis to scale of data.
    sm = plt.cm.ScalarMappable(cmap="jet",
                   norm=plt.Normalize(vmin=0, vmax=len(first_pareto_list) - 1))

    # Fake up the array of the scalar mappable.
    sm._A = []

    # Plot the colorbar.
    cbar = plt.colorbar(sm, ticks=[0, len(first_pareto_list) - 1])

    # Set label of colorbar.
    # cbar.ax.get_yaxis().labelpad = 15
    cbar.ax.set_ylabel('Generation', rotation=90)

    # Save plot and close.
    plt.savefig(path.join(params['FILE_PATH'], "fitness.pdf"))
    plt.close() 
Example 65
Project: neural-pipeline   Author: toodef   File: tensorboard.py    MIT License 4 votes vote down vote up
def _update_metrics(self, metrics: [AbstractMetric], metrics_groups: [MetricsGroup]) -> None:
        """
        Update console

        :param metrics: metrics
        """

        def process_metric(cur_metric, parent_tag: str = None):
            def add_histogram(name: str, vals, step_num, bins):
                try:
                    self.__writer.add_histogram(name, vals, step_num, bins)
                except:
                    pass

            tag = lambda name: name if parent_tag is None else '{}/{}'.format(parent_tag, name)

            if isinstance(cur_metric, MetricsGroup):
                for m in cur_metric.metrics():
                    if m.get_values().size > 0:
                        self.__writer.add_scalars(tag(m.name()), {m.name(): np.mean(m.get_values())}, global_step=self.epoch_num)
                        add_histogram(tag(m.name()) + '_hist',
                                      np.clip(m.get_values(), m.min_val(), m.max_val()).astype(np.float32),
                                      self.epoch_num, np.linspace(m.min_val(), m.max_val(), num=11).astype(np.float32))
            else:
                values = cur_metric.get_values().astype(np.float32)
                if values.size > 0:
                    self.__writer.add_scalar(tag(cur_metric.name()), float(np.mean(values)), global_step=self.epoch_num)
                    add_histogram(tag(cur_metric.name()) + '_hist',
                                  np.clip(values, cur_metric.min_val(), cur_metric.max_val()).astype(np.float32),
                                  self.epoch_num, np.linspace(cur_metric.min_val(), cur_metric.max_val(), num=11).astype(np.float32))

        if self.__writer is None:
            return

        for metric in metrics:
            process_metric(metric)

        for metrics_group in metrics_groups:
            for metric in metrics_group.metrics():
                process_metric(metric, metrics_group.name())
            for group in metrics_group.groups():
                process_metric(group, metrics_group.name()) 
Example 66
Project: FCOS_GluonCV   Author: DetectionTeamUCAS   File: keypoints.py    Apache License 2.0 4 votes vote down vote up
def plot_keypoints(img, coords, confidence, class_ids, bboxes, scores,
                   box_thresh=0.5, keypoint_thresh=0.2, **kwargs):
    """Visualize keypoints.

    Parameters
    ----------
    img : numpy.ndarray or mxnet.nd.NDArray
        Image with shape `H, W, 3`.
    coords : numpy.ndarray or mxnet.nd.NDArray
        Array with shape `Batch, N_Joints, 2`.
    confidence : numpy.ndarray or mxnet.nd.NDArray
        Array with shape `Batch, N_Joints, 1`.
    class_ids : numpy.ndarray or mxnet.nd.NDArray
        Class IDs.
    bboxes : numpy.ndarray or mxnet.nd.NDArray
        Bounding boxes with shape `N, 4`. Where `N` is the number of boxes.
    scores : numpy.ndarray or mxnet.nd.NDArray, optional
        Confidence scores of the provided `bboxes` with shape `N`.
    box_thresh : float, optional, default 0.5
        Display threshold if `scores` is provided. Scores with less than `box_thresh`
        will be ignored in display.
    keypoint_thresh : float, optional, default 0.2
        Keypoints with confidence less than `keypoint_thresh` will be ignored in display.

    Returns
    -------
    matplotlib axes
        The ploted axes.

    """
    if isinstance(coords, mx.nd.NDArray):
        coords = coords.asnumpy()
    if isinstance(class_ids, mx.nd.NDArray):
        class_ids = class_ids.asnumpy()
    if isinstance(bboxes, mx.nd.NDArray):
        bboxes = bboxes.asnumpy()
    if isinstance(scores, mx.nd.NDArray):
        scores = scores.asnumpy()
    if isinstance(confidence, mx.nd.NDArray):
        confidence = confidence.asnumpy()

    joint_visible = confidence[:, :, 0] > keypoint_thresh
    joint_pairs = [[0, 1], [1, 3], [0, 2], [2, 4],
                   [5, 6], [5, 7], [7, 9], [6, 8], [8, 10],
                   [5, 11], [6, 12], [11, 12],
                   [11, 13], [12, 14], [13, 15], [14, 16]]

    person_ind = class_ids[0] == 0
    ax = plot_bbox(img, bboxes[0][person_ind[:, 0]],
                   scores[0][person_ind[:, 0]], thresh=box_thresh, **kwargs)

    colormap_index = np.linspace(0, 1, len(joint_pairs))
    for i in range(coords.shape[0]):
        pts = coords[i]
        for cm_ind, jp in zip(colormap_index, joint_pairs):
            if joint_visible[i, jp[0]] and joint_visible[i, jp[1]]:
                ax.plot(pts[jp, 0], pts[jp, 1],
                        linewidth=3.0, alpha=0.7, color=plt.cm.cool(cm_ind))
                ax.scatter(pts[jp, 0], pts[jp, 1], s=20)
    return ax 
Example 67
Project: numpynet   Author: uptake   File: examples.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def make_checkerboard_training_set(
    num_points=0, noise=0.0, randomize=True, x_min=0.0, x_max=1.0, y_min=0.0, y_max=1.0
):
    """
    Makes a binary array like a checkerboard (to work on an xor like problem)
    :param num_points: (int) The number of points you want in your training set
    :param noise: (float) percent to bit-flip in the training data, allows it to be imperfect
    :param randomize: (bool) True if you want the locations to be random, False if you want an ordered grid
    :param x_min: (float) minimum x of the 2D domain
    :param x_max: (float) maximum x of the 2D domain
    :param y_min: (float) minimum y of the 2D domain
    :param y_max: (float) maximum y of the 2D domain
    :return:
    """
    log.out.info("Generating target data.")
    # Select coordinates to do an XOR like operation on
    coords = []
    bools = []
    if randomize:
        for i in range(num_points):
            # Add num_points randomly
            coord_point = np.random.random(2)
            coord_point[0] = coord_point[0] * (x_max - x_min) + x_min
            coord_point[1] = coord_point[1] * (y_max - y_min) + y_min
            coords.append(coord_point)
    else:
        x_points = np.linspace(x_min, x_max, int(np.sqrt(num_points)))
        y_points = np.linspace(y_min, y_max, int(np.sqrt(num_points)))
        for i in range(int(np.sqrt(num_points))):
            for j in range(int(np.sqrt(num_points))):
                # Add num_points randomly
                coord_point = [x_points[i], y_points[j]]
                coords.append(coord_point)
    # Assign an xor boolean value to the coordinates
    for coord_point in coords:
        bool_point = np.array(
            [np.round(coord_point[0]) % 2, np.round(coord_point[1]) % 2]
        ).astype(bool)
        bools.append(np.logical_xor(bool_point[0], bool_point[1]))
    # If noisy then bit flip
    if noise > 0.0:
        for i in enumerate(bools):
            if np.random.random() < noise:
                bools[i] = np.logical_not(bools[i])
    # Build training vectors
    train_in = None
    train_out = None
    for i, coord in enumerate(coords):
        # Need to initialize the arrays
        if i == 0:
            train_in = np.array([coord])
            train_out = np.array([[bools[i]]])
        else:
            train_in = np.append(train_in, np.array([coord]), axis=0)
            train_out = np.append(train_out, np.array([[bools[i]]]), axis=1)

    train_out = train_out.T
    return train_in, train_out 
Example 68
Project: DOTA_models   Author: ringringyi   File: build_image_data.py    Apache License 2.0 4 votes vote down vote up
def _process_image_files(name, filenames, texts, labels, num_shards):
  """Process and save list of images as TFRecord of Example protos.

  Args:
    name: string, unique identifier specifying the data set
    filenames: list of strings; each string is a path to an image file
    texts: list of strings; each string is human readable, e.g. 'dog'
    labels: list of integer; each integer identifies the ground truth
    num_shards: integer number of shards for this data set.
  """
  assert len(filenames) == len(texts)
  assert len(filenames) == len(labels)

  # Break all images into batches with a [ranges[i][0], ranges[i][1]].
  spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
  ranges = []
  for i in range(len(spacing) - 1):
    ranges.append([spacing[i], spacing[i + 1]])

  # Launch a thread for each batch.
  print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
  sys.stdout.flush()

  # Create a mechanism for monitoring when all threads are finished.
  coord = tf.train.Coordinator()

  # Create a generic TensorFlow-based utility for converting all image codings.
  coder = ImageCoder()

  threads = []
  for thread_index in range(len(ranges)):
    args = (coder, thread_index, ranges, name, filenames,
            texts, labels, num_shards)
    t = threading.Thread(target=_process_image_files_batch, args=args)
    t.start()
    threads.append(t)

  # Wait for all the threads to terminate.
  coord.join(threads)
  print('%s: Finished writing all %d images in data set.' %
        (datetime.now(), len(filenames)))
  sys.stdout.flush() 
Example 69
Project: DOTA_models   Author: ringringyi   File: build_imagenet_data.py    Apache License 2.0 4 votes vote down vote up
def _process_image_files(name, filenames, synsets, labels, humans,
                         bboxes, num_shards):
  """Process and save list of images as TFRecord of Example protos.

  Args:
    name: string, unique identifier specifying the data set
    filenames: list of strings; each string is a path to an image file
    synsets: list of strings; each string is a unique WordNet ID
    labels: list of integer; each integer identifies the ground truth
    humans: list of strings; each string is a human-readable label
    bboxes: list of bounding boxes for each image. Note that each entry in this
      list might contain from 0+ entries corresponding to the number of bounding
      box annotations for the image.
    num_shards: integer number of shards for this data set.
  """
  assert len(filenames) == len(synsets)
  assert len(filenames) == len(labels)
  assert len(filenames) == len(humans)
  assert len(filenames) == len(bboxes)

  # Break all images into batches with a [ranges[i][0], ranges[i][1]].
  spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
  ranges = []
  threads = []
  for i in range(len(spacing) - 1):
    ranges.append([spacing[i], spacing[i + 1]])

  # Launch a thread for each batch.
  print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
  sys.stdout.flush()

  # Create a mechanism for monitoring when all threads are finished.
  coord = tf.train.Coordinator()

  # Create a generic TensorFlow-based utility for converting all image codings.
  coder = ImageCoder()

  threads = []
  for thread_index in range(len(ranges)):
    args = (coder, thread_index, ranges, name, filenames,
            synsets, labels, humans, bboxes, num_shards)
    t = threading.Thread(target=_process_image_files_batch, args=args)
    t.start()
    threads.append(t)

  # Wait for all the threads to terminate.
  coord.join(threads)
  print('%s: Finished writing all %d images in data set.' %
        (datetime.now(), len(filenames)))
  sys.stdout.flush() 
Example 70
Project: DOTA_models   Author: ringringyi   File: build_mscoco_data.py    Apache License 2.0 4 votes vote down vote up
def _process_image_files(thread_index, ranges, name, images, decoder, vocab,
                         num_shards):
  """Processes and saves a subset of images as TFRecord files in one thread.

  Args:
    thread_index: Integer thread identifier within [0, len(ranges)].
    ranges: A list of pairs of integers specifying the ranges of the dataset to
      process in parallel.
    name: Unique identifier specifying the dataset.
    images: List of ImageMetadata.
    decoder: An ImageDecoder object.
    vocab: A Vocabulary object.
    num_shards: Integer number of shards for the output files.
  """
  # Each thread produces N shards where N = num_shards / num_threads. For
  # instance, if num_shards = 128, and num_threads = 2, then the first thread
  # would produce shards [0, 64).
  num_threads = len(ranges)
  assert not num_shards % num_threads
  num_shards_per_batch = int(num_shards / num_threads)

  shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1],
                             num_shards_per_batch + 1).astype(int)
  num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0]

  counter = 0
  for s in xrange(num_shards_per_batch):
    # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
    shard = thread_index * num_shards_per_batch + s
    output_filename = "%s-%.5d-of-%.5d" % (name, shard, num_shards)
    output_file = os.path.join(FLAGS.output_dir, output_filename)
    writer = tf.python_io.TFRecordWriter(output_file)

    shard_counter = 0
    images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
    for i in images_in_shard:
      image = images[i]

      sequence_example = _to_sequence_example(image, decoder, vocab)
      if sequence_example is not None:
        writer.write(sequence_example.SerializeToString())
        shard_counter += 1
        counter += 1

      if not counter % 1000:
        print("%s [thread %d]: Processed %d of %d items in thread batch." %
              (datetime.now(), thread_index, counter, num_images_in_thread))
        sys.stdout.flush()

    writer.close()
    print("%s [thread %d]: Wrote %d image-caption pairs to %s" %
          (datetime.now(), thread_index, shard_counter, output_file))
    sys.stdout.flush()
    shard_counter = 0
  print("%s [thread %d]: Wrote %d image-caption pairs to %d shards." %
        (datetime.now(), thread_index, counter, num_shards_per_batch))
  sys.stdout.flush() 
Example 71
Project: DOTA_models   Author: ringringyi   File: build_mscoco_data.py    Apache License 2.0 4 votes vote down vote up
def _process_dataset(name, images, vocab, num_shards):
  """Processes a complete data set and saves it as a TFRecord.

  Args:
    name: Unique identifier specifying the dataset.
    images: List of ImageMetadata.
    vocab: A Vocabulary object.
    num_shards: Integer number of shards for the output files.
  """
  # Break up each image into a separate entity for each caption.
  images = [ImageMetadata(image.image_id, image.filename, [caption])
            for image in images for caption in image.captions]

  # Shuffle the ordering of images. Make the randomization repeatable.
  random.seed(12345)
  random.shuffle(images)

  # Break the images into num_threads batches. Batch i is defined as
  # images[ranges[i][0]:ranges[i][1]].
  num_threads = min(num_shards, FLAGS.num_threads)
  spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int)
  ranges = []
  threads = []
  for i in xrange(len(spacing) - 1):
    ranges.append([spacing[i], spacing[i + 1]])

  # Create a mechanism for monitoring when all threads are finished.
  coord = tf.train.Coordinator()

  # Create a utility for decoding JPEG images to run sanity checks.
  decoder = ImageDecoder()

  # Launch a thread for each batch.
  print("Launching %d threads for spacings: %s" % (num_threads, ranges))
  for thread_index in xrange(len(ranges)):
    args = (thread_index, ranges, name, images, decoder, vocab, num_shards)
    t = threading.Thread(target=_process_image_files, args=args)
    t.start()
    threads.append(t)

  # Wait for all the threads to terminate.
  coord.join(threads)
  print("%s: Finished processing all %d image-caption pairs in data set '%s'." %
        (datetime.now(), len(images), name)) 
Example 72
Project: pypriv   Author: soeaver   File: vis_mask_rcnn.py    MIT License 4 votes vote down vote up
def vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):
    """Visualizes keypoints (adapted from vis_one_image).
    kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).
    """
    dataset_keypoints, _ = get_keypoints()
    kp_lines = kp_connections(dataset_keypoints)

    # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
    colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]

    # Perform the drawing on a copy of the image, to allow for blending.
    kp_mask = np.copy(img)

    # Draw mid shoulder / mid hip first for better visualization.
    mid_shoulder = (
        kps[:2, dataset_keypoints.index('right_shoulder')] +
        kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
    sc_mid_shoulder = np.minimum(
        kps[2, dataset_keypoints.index('right_shoulder')],
        kps[2, dataset_keypoints.index('left_shoulder')])
    mid_hip = (
        kps[:2, dataset_keypoints.index('right_hip')] +
        kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
    sc_mid_hip = np.minimum(
        kps[2, dataset_keypoints.index('right_hip')],
        kps[2, dataset_keypoints.index('left_hip')])
    nose_idx = dataset_keypoints.index('nose')
    if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:
        cv2.line(
            kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),
            color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)
    if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
        cv2.line(
            kp_mask, tuple(mid_shoulder), tuple(mid_hip),
            color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)

    # Draw the keypoints.
    for l in range(len(kp_lines)):
        i1 = kp_lines[l][0]
        i2 = kp_lines[l][1]
        p1 = kps[0, i1], kps[1, i1]
        p2 = kps[0, i2], kps[1, i2]
        if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
            cv2.line(
                kp_mask, p1, p2,
                color=colors[l], thickness=2, lineType=cv2.LINE_AA)
        if kps[2, i1] > kp_thresh:
            cv2.circle(
                kp_mask, p1,
                radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)
        if kps[2, i2] > kp_thresh:
            cv2.circle(
                kp_mask, p2,
                radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)

    # Blend the keypoints.
    return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0) 
Example 73
Project: graph-neural-networks   Author: alelab-upenn   File: graphTools.py    GNU General Public License v3.0 4 votes vote down vote up
def splineBasis(K, x, degree=3):
    # Function written by M. Defferrard, taken verbatim (except for function
    # name), from 
    # https://github.com/mdeff/cnn_graph/blob/master/lib/models.py#L662
    """
    Return the B-spline basis.
    K: number of control points.
    x: evaluation points
       or number of evenly distributed evaluation points.
    degree: degree of the spline. Cubic spline by default.
    """
    if np.isscalar(x):
        x = np.linspace(0, 1, x)

    # Evenly distributed knot vectors.
    kv1 = x.min() * np.ones(degree)
    kv2 = np.linspace(x.min(), x.max(), K-degree+1)
    kv3 = x.max() * np.ones(degree)
    kv = np.concatenate((kv1, kv2, kv3))

    # Cox - DeBoor recursive function to compute one spline over x.
    def cox_deboor(k, d):
        # Test for end conditions, the rectangular degree zero spline.
        if (d == 0):
            return ((x - kv[k] >= 0) & (x - kv[k + 1] < 0)).astype(int)

        denom1 = kv[k + d] - kv[k]
        term1 = 0
        if denom1 > 0:
            term1 = ((x - kv[k]) / denom1) * cox_deboor(k, d - 1)

        denom2 = kv[k + d + 1] - kv[k + 1]
        term2 = 0
        if denom2 > 0:
            term2 = ((-(x - kv[k + d + 1]) / denom2) * cox_deboor(k + 1, d - 1))

        return term1 + term2

    # Compute basis for each point
    basis = np.column_stack([cox_deboor(k, degree) for k in range(K)])
    basis[-1,-1] = 1
    return basis 
Example 74
Project: gullikson-scripts   Author: kgullikson88   File: Broaden.py    MIT License 4 votes vote down vote up
def MacroBroad(data, vmacro, extend=True):
    """
      This broadens the data by a given macroturbulent velocity.
    It works for small wavelength ranges. I need to make a better
    version that is accurate for large wavelength ranges! Sorry
    for the terrible variable names, it was copied from
    convol.pro in AnalyseBstar (Karolien Lefever)

    Parameters:
    ===========
    -data:     kglib.utils.DataStructures.xypoint instance
               Stores the data to be broadened. The data MUST
               be equally-spaced before calling this!

    -vmacro:   float
               The macroturbulent velocity, in km/s

    -extend:   boolean
               If true, the y-axis will be extended to avoid edge-effects

    Returns:
    ========
    A broadened version of data.
    """
    # Make the kernel
    c = constants.c.cgs.value * units.cm.to(units.km)
    sq_pi = np.sqrt(np.pi)
    lambda0 = np.median(data.x)
    xspacing = data.x[1] - data.x[0]
    mr = vmacro * lambda0 / c
    ccr = 2 / (sq_pi * mr)

    px = np.arange(-data.size() / 2, data.size() / 2 + 1) * xspacing
    pxmr = abs(px) / mr
    profile = ccr * (np.exp(-pxmr ** 2) + sq_pi * pxmr * (erf(pxmr) - 1.0))

    # Extend the xy axes to avoid edge-effects, if desired
    if extend:

        before = data.y[-profile.size / 2 + 1:]
        after = data.y[:profile.size / 2]
        extended = np.r_[before, data.y, after]

        first = data.x[0] - float(int(profile.size / 2.0 + 0.5)) * xspacing
        last = data.x[-1] + float(int(profile.size / 2.0 + 0.5)) * xspacing
        x2 = np.linspace(first, last, extended.size)

        conv_mode = "valid"

    else:
        extended = data.y.copy()
        x2 = data.x.copy()
        conv_mode = "same"

    # Do the convolution
    newdata = data.copy()
    newdata.y = fftconvolve(extended, profile / profile.sum(), mode=conv_mode)

    return newdata 
Example 75
Project: gullikson-scripts   Author: kgullikson88   File: HelperFunctions.py    MIT License 4 votes vote down vote up
def IterativeLowPass(data, vel, numiter=100, lowreject=3, highreject=3, width=5, linearize=False):
    """
    An iterative version of LowPassFilter.
    It will ignore outliers in the low pass filter. New parameters that are
    not described in the docstring fro LowPassFilter are:

    Parameters:
    ===========
    - numiter:       integer
                     The maximum number of iterations to take

    - lowreject:     integer
                     How many sigma below the current filtered curve do we
                     count as bad and ignore in the next iteration?

    - highreject:    integer
                     How many sigma above the current filtered curve do we
                     count as bad and ignore in the next iteration?
    """

    datacopy = data.copy()
    if linearize:
        datafcn = spline(datacopy.x, datacopy.y, k=3)
        errorfcn = spline(datacopy.x, datacopy.err, k=1)
        contfcn = spline(datacopy.x, datacopy.cont, k=1)
        linear = DataStructures.xypoint(datacopy.x.size)
        linear.x = np.linspace(datacopy.x[0], datacopy.x[-1], linear.size())
        linear.y = datafcn(linear.x)
        linear.err = errorfcn(linear.x)
        linear.cont = contfcn(linear.x)
        datacopy = linear.copy()

    done = False
    iter = 0
    datacopy.cont = FittingUtilities.Continuum(datacopy.x, datacopy.y, fitorder=9, lowreject=2.5, highreject=5)
    while not done and iter < numiter:
        done = True
        iter += 1
        smoothed = LowPassFilter(datacopy, vel, width=width)
        residuals = datacopy.y / smoothed
        mean = np.mean(residuals)
        std = np.std(residuals)
        badpoints = np.where(np.logical_or((residuals - mean) < -lowreject * std, residuals - mean > highreject * std))[
            0]
        if badpoints.size > 0:
            done = False
            datacopy.y[badpoints] = smoothed[badpoints]
    if linearize:
        return linear.x, smoothed
    else:
        return smoothed 
Example 76
Project: DiscEvolution   Author: rbooth200   File: reconstruction.py    GNU General Public License v3.0 4 votes vote down vote up
def _test_scheme(Npts, IC, reconstruction, tout, a, Ca = 0.9):
    '''Test schemes using an Explicit 3rd Order TVD RK integration:

    U*   = Un + dt L(Un)
    U**  = 3/4 Un + (1/4)(U*  + dt L(U*))
    Un+1 = 1/3 Un + (2/3)(U** + dt L(U**))

    with the Courant-limited dt = Ca * dx / a.

    Here L(U) = a * U.
    '''
    # Setup up the grid
    stencil = reconstruction.STENCIL
    
    shape = Npts + 1 + 2*stencil
    dx = 1. / Npts
    xe = np.linspace(-dx*stencil, 1 + dx*stencil, shape)

    # Setup the velocity function
    v = a * np.ones_like(xe)

    # Reconstruction function:
    R = reconstruction(xe, 0)
    
    def boundary(Q):
        Q[ :stencil] = Q[Npts:Npts+stencil]
        Q[-stencil:] = Q[stencil:2*stencil]
    

    def update_stage(Q, v, dt):
        Qs = np.empty(Npts + 2*stencil)
        Qs[stencil:-stencil] = Q

        boundary(Qs)
        Qs = R(v[1:-1], Qs)

        return Q - dt * np.diff(Qs*v[stencil:-stencil]) / dx


    # Set the initial conditions
    Q = IC(xe[stencil:-stencil])

    t = 0
    dtmax = Ca * dx / abs(a)
    while t < tout:
        dt = min(dtmax, tout-t)
        Qs =          update_stage(Q , v, dt)
        Qs = (3*Q +   update_stage(Qs, v, dt))/4
        Q  = (  Q + 2*update_stage(Qs, v, dt))/3

        t = min(tout, t+dt)

    xe = xe[stencil:-stencil]
    xc = 0.5*(xe[1:] + xe[:-1])
    return xc, Q 
Example 77
Project: nxt-sketcher   Author: simondolle   File: printer.py    MIT License 4 votes vote down vote up
def display_reachable_area(points_per_lego_unit, angle, structure_settings, plot_errors, plot_actual_points):
    reachable_xs = []
    reachable_ys = []
    grid_to_angle = compute_grid_to_angle_inverse_kinematics(structure_settings, points_per_lego_unit, angle)
    for (x, y), (alpha, beta, _) in grid_to_angle.items():
        if plot_actual_points:
            x, y = get_xy(1./structure_settings.gear_ratio * alpha * degrees_to_radians, 1./structure_settings.gear_ratio * beta * degrees_to_radians, structure_settings)
        x, y = change_referential(x, y, angle)
        reachable_xs.append(x)
        reachable_ys.append(y)

    print_area = find_largest_rectange_quadratic(grid_to_angle, points_per_lego_unit)
    x0, y0, x1, y1 = print_area

    width = x1 - x0
    height = y1 - y0

    xt0, yt0 = change_referential(x0, y0, angle)
    xt1, yt1 = change_referential(x0, y1, angle)
    xt2, yt2 = change_referential(x1, y1, angle)
    xt3, yt3 = change_referential(x1, y0, angle)

    margin = 1

    min_xs = min(reachable_xs) - margin
    max_xs = max(reachable_xs) + margin

    min_ys = min(reachable_ys) - margin
    max_ys = max(reachable_ys) + margin

    if plot_errors:
        xi = np.linspace(min_xs, max_xs, 100)
        yi = np.linspace(min_ys, max_ys, 100)
        X, Y = np.meshgrid(xi, yi)
        errors = np.vectorize(compute_error)(X, Y)

        CS = plt.contourf(X, Y, errors, 15, cmap=plt.cm.rainbow, vmax=abs(errors).max(), vmin=0)
        plt.colorbar(CS)

    plt.scatter(reachable_xs, reachable_ys, marker='o', c='b', s=5)
    plt.plot([xt0, xt1, xt2, xt3, xt0], [yt0, yt1, yt2, yt3, yt0])


    plt.suptitle('width=%s height=%s'%(width, height), fontsize=14)
    plt.axis('equal')
    plt.show() 
Example 78
Project: Cheapest-Flights-bot   Author: PhoenixDD   File: Flight Analysis.py    MIT License 4 votes vote down vote up
def task_4_dbscan(flight_data):
    flight_data['day'] = flight_data['Date_of_Flight'].dt.day
    X = StandardScaler().fit_transform(flight_data[['day', 'Price']])
    db = DBSCAN(eps=.404, min_samples=3).fit(X)
    labels = db.labels_
    clusters = len(set(labels))
    unique_labels = set(labels)
    colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
    plt.subplots(figsize=(12, 8))
    for k, c in zip(unique_labels, colors):
        class_member_mask = (labels == k)
        xy = X[class_member_mask]
        plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=c,
                 markeredgecolor='k', markersize=14)

    plt.title("Total Clusters: {}".format(clusters), fontsize=14, y=1.01)
    flight_data['dbscan_labels'] = db.labels_
    unique_labels=[]
    for index,rows in flight_data.groupby('dbscan_labels').count().iterrows():
        if rows['Price']>=5:
            unique_labels.append(index)
    list_of_5_same_label=[]
    for label in unique_labels:
        count=0
        for index,rows in flight_data.iterrows():
            if rows['dbscan_labels']!=label:
                count = 0
                continue
            count+=1
            if count==5:
                list_of_5_same_label.append(rows['Date_of_Flight'])
                count=0
    for i in list_of_5_same_label:
        flag=1
        for index,rows in flight_data.loc[flight_data['Date_of_Flight'].isin([i,i-timedelta(days=1),i-timedelta(days=2),i-timedelta(days=3),i-timedelta(days=4)])].iterrows():
            for ind, row in flight_data.loc[flight_data['Date_of_Flight'].isin([i, i - timedelta(days=1), i - timedelta(days=2), i - timedelta(days=3), i - timedelta(days=4)])].iterrows():
                if math.fabs(rows['Price']-row['Price'])>20:
                    flag=0
                    list_of_5_same_label.remove(i)
                    break
            if flag==0:
                break
    min=float('inf')
    for i in list_of_5_same_label:
        temp=flight_data.loc[flight_data['Date_of_Flight'].isin([i, i - timedelta(days=1), i - timedelta(days=2), i - timedelta(days=3), i - timedelta(days=4)])]
        if temp[['Price']].mean()[0].item()<min:
            min=temp[['Price']].mean().item()
            date=i
    clean_data=[]
    for index,rows in flight_data.loc[flight_data['Date_of_Flight'].isin([date, date - timedelta(days=1), date - timedelta(days=2), date - timedelta(days=3), date - timedelta(days=4)])].iterrows():
        clean_data.append([rows['Price'], rows['Date_of_Flight']])
    return pd.DataFrame(clean_data,columns=['Price','Date_of_Flight']) 
Example 79
Project: Cheapest-Flights-bot   Author: PhoenixDD   File: Flight Analysis.py    MIT License 4 votes vote down vote up
def task_3_dbscan(flight_data):
    flight_data['day'] = flight_data['Date_of_Flight'].dt.day
    X = StandardScaler().fit_transform(flight_data[['day','Price']])
    db = DBSCAN(eps=.404, min_samples=3).fit(X)
    labels = db.labels_
    clusters = len(set(labels))
    unique_labels = set(labels)
    colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
    plt.subplots(figsize=(12, 8))
    for k, c in zip(unique_labels, colors):
        class_member_mask = (labels == k)
        xy = X[class_member_mask]
        plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=c,
                 markeredgecolor='k', markersize=14)

    plt.title("Total Clusters: {}".format(clusters), fontsize=14, y=1.01)
    flight_data['dbscan_labels'] = db.labels_
    plt.matplotlib.pyplot.savefig('task_3_dbscan.png')
    plt.clf()
    outliers=flight_data.loc[flight_data['dbscan_labels']==-1]
    clean_data = []
    for Index,Row in outliers.iterrows():
        minimum = float('inf')
        for label in unique_labels:
            if label==-1:
                continue
            t=flight_data.loc[flight_data['dbscan_labels']==label]
            t=t.as_matrix(columns=['Price','day'])
            t=t.mean(axis=0)
            if math.sqrt((Row['Price']-t[0])**2+(Row['day']-t[1])**2)<minimum:
                minimum=math.sqrt((Row['Price']-t[0])**2+(Row['day']-t[1])**2)
                closest_cluster=label
                closest_cluster_price=t[0]
        if(closest_cluster_price>Row['Price']):
            min = float('inf')
            counter=0
            sum=0
            for index,row in flight_data.iterrows():
                if row['dbscan_labels'] == closest_cluster:
                    sum=sum+((row['Price']-closest_cluster_price)**2)
                    counter+=1
            if Row['Price']<=max(closest_cluster_price-2*math.sqrt(sum/counter),50):
                clean_data.append([Row['Price'],Row['Date_of_Flight']])
    return pd.DataFrame(clean_data, columns=['Price', 'Date_of_Flight']) 
Example 80
Project: ros_dmp   Author: abhishek098   File: dmp.py    Apache License 2.0 4 votes vote down vote up
def imitate_path(self, y_des):
        """Takes in a desired trajectory and generates the set of
        system parameters that best realize this path.

        y_des list/array: the desired trajectories of each DMP
                          should be shaped [n_dmps, run_time]
        """

        # set initial state and goal
        if y_des.ndim == 1:
            y_des = y_des.reshape(1, len(y_des))
        self.y0 = y_des[:, 0].copy()
        self.y_des = y_des.copy()
        self.goal = self.gen_goal(y_des)

        self.check_offset()

        # generate function to interpolate the desired trajectory
        import scipy.interpolate
        path = np.zeros((self.n_dmps, self.timesteps))
        x = np.linspace(0, self.cs.run_time, y_des.shape[1])
        for d in range(self.n_dmps):
            path_gen = scipy.interpolate.interp1d(x, y_des[d])
            for t in range(self.timesteps):
                path[d, t] = path_gen(t * self.dt)
        y_des = path

        # calculate velocity of y_des
        dy_des = np.diff(y_des) / self.dt
        # add zero to the beginning of every row
        dy_des = np.hstack((np.zeros((self.n_dmps, 1)), dy_des))

        # calculate acceleration of y_des
        ddy_des = np.diff(dy_des) / self.dt
        # add zero to the beginning of every row
        ddy_des = np.hstack((np.zeros((self.n_dmps, 1)), ddy_des))

        f_target = np.zeros((y_des.shape[1], self.n_dmps))
        # find the force required to move along this trajectory
        for d in range(self.n_dmps):
            f_target[:, d] = (ddy_des[d] - self.ay[d] *
                              (self.by[d] * (self.goal[d] - y_des[d]) -
                              dy_des[d]))
        
        # efficiently generate weights to realize f_target
        self.gen_weights(f_target)

        self.reset_state()
        return self.w