Python scipy.integrate.cumtrapz() Examples

The following are 28 code examples of scipy.integrate.cumtrapz(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.integrate , or try the search function .
Example #1
Source File: sm_utils.py    From gmpe-smtk with GNU Affero General Public License v3.0 6 votes vote down vote up
def get_velocity_displacement(time_step, acceleration, units="cm/s/s",
                              velocity=None, displacement=None):
    '''
    Returns the velocity and displacment time series using simple integration
    :param float time_step:
        Time-series time-step (s)
    :param numpy.ndarray acceleration:
        Acceleration time-history
    :returns:
        velocity - Velocity Time series (cm/s)
        displacement - Displacement Time series (cm)
    '''
    acceleration = convert_accel_units(acceleration, units)
    if velocity is None:
        velocity = time_step * cumtrapz(acceleration, initial=0.)
    if displacement is None:
        displacement = time_step * cumtrapz(velocity, initial=0.)
    return velocity, displacement 
Example #2
Source File: csr.py    From ocelot with GNU General Public License v3.0 6 votes vote down vote up
def K0_fin_anf_opt(self, i, traj, wmin, gamma):
        s = np.zeros(i)
        n = np.zeros((i, 3))
        R = np.zeros(i)
        w = np.zeros(i)
        self.K0_0(i, traj[0], traj[1], traj[2], traj[3], gamma, s, n, R, w)
        j = np.where(w <= wmin)[0]

        if len(j) > 0:
            j = j[-1]
            w = w[j:i]
            s = s[j:i]
        else:
            j = 0
        K = self.K0_1(i, j, R, n, traj[4], traj[5], traj[6], w, gamma)

        if len(K) > 1:
            a = np.append(0.5 * (K[0:-1] + K[1:]) * np.diff(s), 0.5 * K[-1] * s[-1])
            KS = np.cumsum(a[::-1])[::-1]
            # KS = cumsum_inv_jit(a)
            # KS = cumtrapz(K[::-1], -s[::-1], initial=0)[::-1] + 0.5*K[-1]*s[-1]
        else:
            KS = 0.5 * K[-1] * s[-1]
        return w, KS 
Example #3
Source File: topology.py    From quantum-honeycomp with GNU General Public License v3.0 6 votes vote down vote up
def chern_density(h,nk=10,operator=None,delta=0.02,dk=0.02,
        es=np.linspace(-1.0,1.0,40)):
  """Compute the Chern density as a function of the energy"""
  ks = klist.kmesh(h.dimensionality,nk=nk)
  cs = np.zeros(es.shape[0]) # initialize
  f = h.get_gk_gen(delta=delta,canonical_phase=True) # green function generator
  tr = timing.Testimator("CHERN DENSITY",maxite=len(ks))
  from . import parallel
  def fp(k): # compute berry curvatures
    if parallel.cores==1: tr.iterate()
    else: print(k)
#    k = np.random.random(3)
    fgreen = berry_green_generator(f,k=k,dk=dk,operator=operator,full=False) 
    return np.array([fgreen(e).real for e in es])
  out = parallel.pcall(fp,ks) # compute everything
  for o in out: cs += o # add contributions
  cs = cs/(len(ks)*np.pi*2) # normalize
  from scipy.integrate import cumtrapz
  csi = cumtrapz(cs,x=es,initial=0) # integrate
  np.savetxt("CHERN_DENSITY.OUT",np.matrix([es,cs]).T)
  np.savetxt("CHERN_DENSITY_INTEGRATED.OUT",np.matrix([es,csi]).T) 
Example #4
Source File: spectrum.py    From quantum-honeycomp with GNU General Public License v3.0 6 votes vote down vote up
def set_filling(h,filling=0.5,nk=10,extrae=0.,delta=1e-1):
    """Set the filling of a Hamiltonian"""
    if h.has_eh: raise
    fill = filling + extrae/h.intra.shape[0] # filling
    n = h.intra.shape[0]
    use_kpm = False
    if n>algebra.maxsize: # use the KPM method
        use_kpm = True
        print("Using KPM in set_filling")
    if use_kpm: # use KPM
        es,ds = h.get_dos(energies=np.linspace(-5.0,5.0,1000),
                use_kpm=True,delta=delta,nk=nk,random=False)
        from scipy.integrate import cumtrapz
        di = cumtrapz(ds,es)
        ei = (es[0:len(es)-1] + es[1:len(es)])/2.
        di /= di[len(di)-1] # normalize
        from scipy.interpolate import interp1d
        f = interp1d(di,ei) # interpolating function
        efermi = f(fill) # get the fermi energy
    else: # dense Hamiltonian, use ED
        es = eigenvalues(h,nk=nk)
        efermi = get_fermi_energy(es,fill)
    h.shift_fermi(-efermi) # shift the fermi energy 
Example #5
Source File: test_quadrature.py    From Computable with MIT License 6 votes vote down vote up
def test_nd(self):
        x = np.arange(3 * 2 * 4).reshape(3, 2, 4)
        y = x
        y_int = cumtrapz(y, x, initial=0)
        y_expected = np.array([[[0., 0.5, 2., 4.5],
                                [0., 4.5, 10., 16.5]],
                               [[0., 8.5, 18., 28.5],
                                [0., 12.5, 26., 40.5]],
                               [[0., 16.5, 34., 52.5],
                                [0., 20.5, 42., 64.5]]])

        assert_allclose(y_int, y_expected)

        # Try with all axes
        shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)]
        for axis, shape in zip([0, 1, 2], shapes):
            y_int = cumtrapz(y, x, initial=3.45, axis=axis)
            assert_equal(y_int.shape, (3, 2, 4))
            y_int = cumtrapz(y, x, initial=None, axis=axis)
            assert_equal(y_int.shape, shape) 
Example #6
Source File: test_quadrature.py    From Computable with MIT License 6 votes vote down vote up
def test_x_none(self):
        y = np.linspace(-2, 2, num=5)

        y_int = cumtrapz(y)
        y_expected = [-1.5, -2., -1.5, 0.]
        assert_allclose(y_int, y_expected)

        y_int = cumtrapz(y, initial=1.23)
        y_expected = [1.23, -1.5, -2., -1.5, 0.]
        assert_allclose(y_int, y_expected)

        y_int = cumtrapz(y, dx=3)
        y_expected = [-4.5, -6., -4.5, 0.]
        assert_allclose(y_int, y_expected)

        y_int = cumtrapz(y, dx=3, initial=1.23)
        y_expected = [1.23, -4.5, -6., -4.5, 0.]
        assert_allclose(y_int, y_expected) 
Example #7
Source File: report.py    From pyiron with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def from_file(self, filename="REPORT"):
        """
        Reads values from files and stores it in the `parse_dict` attribute

        Args:
            filename (str): Path to the file that needs to be parsed
        """
        with open(filename, "r") as f:
            lines = f.readlines()
        rel_lines = [lines[i + 2] for i, line in enumerate(lines) if "Blue_moon" in line]
        if len(rel_lines) > 0:
            [lam, _, _, _] = [val for val in np.genfromtxt(rel_lines, usecols=[1, 2, 3, 4]).T]
            rel_lines = [lines[i] for i, line in enumerate(lines) if "cc>" in line]
            cv = np.genfromtxt(rel_lines, usecols=[2])
            fe = cumtrapz(lam, cv)
            self.parse_dict["cv_full"] = cv
            self.parse_dict["derivative"] = lam
            self.parse_dict["cv"] = cv[:-1]
            self.parse_dict["free_energy"] = fe 
Example #8
Source File: fitting.py    From grizli with MIT License 6 votes vote down vote up
def compute_cdf_percentiles(fit, cdf_sigmas=CDF_SIGMAS):
    """
    Compute tabulated percentiles of the PDF
    """
    from scipy.interpolate import Akima1DInterpolator
    from scipy.integrate import cumtrapz
    import scipy.stats

    if cdf_sigmas is None:
        cdf_sigmas = CDF_SIGMAS

    cdf_y = scipy.stats.norm.cdf(cdf_sigmas)

    if len(fit['zgrid']) == 1:
        return np.ones_like(cdf_y)*fit['zgrid'][0], cdf_y

    spl = Akima1DInterpolator(fit['zgrid'], np.log(fit['pdf']), axis=1)
    zrfine = [fit['zgrid'].min(), fit['zgrid'].max()]
    zfine = utils.log_zgrid(zr=zrfine, dz=0.0001)
    ok = np.isfinite(spl(zfine))
    pz_fine = np.exp(spl(zfine))
    pz_fine[~ok] = 0

    cdf_fine = cumtrapz(pz_fine, x=zfine)
    cdf_x = np.interp(cdf_y, cdf_fine/cdf_fine[-1], zfine[1:])

    return cdf_x, cdf_y 
Example #9
Source File: science_utils.py    From oopt-gnpy with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _int_spontaneous_raman(self, z_array, raman_matrix, alphap_fiber, freq_array,
                               cr_raman_matrix, freq_diff, ase_bc, bn_array, temperature):
        spontaneous_raman_scattering = OptimizeResult()

        simulation = Simulation.get_simulation()
        sim_params = simulation.sim_params

        dx = sim_params.raman_params.space_resolution
        h = ph.value('Planck constant')
        kb = ph.value('Boltzmann constant')

        power_ase = np.nan * np.ones(raman_matrix.shape)
        int_pump = cumtrapz(raman_matrix, z_array, dx=dx, axis=1, initial=0)

        for f_ind, f_ase in enumerate(freq_array):
            cr_raman = cr_raman_matrix[f_ind, :]
            vibrational_loss = f_ase / freq_array[:f_ind]
            eta = 1 / (np.exp((h * freq_diff[f_ind, f_ind + 1:]) / (kb * temperature)) - 1)

            int_fiber_loss = -alphap_fiber[f_ind] * z_array
            int_raman_loss = np.sum((cr_raman[:f_ind] * vibrational_loss * int_pump[:f_ind, :].transpose()).transpose(),
                                    axis=0)
            int_raman_gain = np.sum((cr_raman[f_ind + 1:] * int_pump[f_ind + 1:, :].transpose()).transpose(), axis=0)

            int_gain_loss = int_fiber_loss + int_raman_gain + int_raman_loss

            new_ase = np.sum((cr_raman[f_ind + 1:] * (1 + eta) * raman_matrix[f_ind + 1:, :].transpose()).transpose()
                             * h * f_ase * bn_array[f_ind], axis=0)

            bc_evolution = ase_bc[f_ind] * np.exp(int_gain_loss)
            ase_evolution = np.exp(int_gain_loss) * cumtrapz(new_ase *
                                                             np.exp(-int_gain_loss), z_array, dx=dx, initial=0)

            power_ase[f_ind, :] = bc_evolution + ase_evolution

        spontaneous_raman_scattering.x = 2 * power_ase
        return spontaneous_raman_scattering 
Example #10
Source File: co2.py    From CO2MPAS-TA with European Union Public License 1.1 5 votes vote down vote up
def calculate_batteries_phases_delta_energy(
        times, phases_indices, drive_battery_electric_powers,
        service_battery_electric_powers):
    """
    Calculates the phases delta energy of the batteries [Wh].

    :param times:
        Time vector.
    :type times: numpy.array

    :param phases_indices:
        Indices of the cycle phases [-].
    :type phases_indices: numpy.array

    :param drive_battery_electric_powers:
        Drive battery electric power [kW].
    :type drive_battery_electric_powers: numpy.array

    :param service_battery_electric_powers:
        Service battery electric power [kW].
    :type service_battery_electric_powers: numpy.array

    :return:
        Phases delta energy of the batteries [Wh].
    :rtype: numpy.array
    """
    from scipy.integrate import cumtrapz
    p = service_battery_electric_powers + drive_battery_electric_powers
    e = cumtrapz(p, times, initial=0)
    i = phases_indices.copy()
    i[:, 1] -= 1
    return np.diff(e[i], axis=1).ravel() / 3.6 
Example #11
Source File: base.py    From bilby with MIT License 5 votes vote down vote up
def cdf(self, val):
        """ Generic method to calculate CDF, can be overwritten in subclass """
        if np.any(np.isinf([self.minimum, self.maximum])):
            raise ValueError(
                "Unable to use the generic CDF calculation for priors with"
                "infinite support")
        x = np.linspace(self.minimum, self.maximum, 1000)
        pdf = self.prob(x)
        cdf = cumtrapz(pdf, x, initial=0)
        interp = interp1d(x, cdf, assume_sorted=True, bounds_error=False,
                          fill_value=(0, 1))
        return interp(val) 
Example #12
Source File: interpolated.py    From bilby with MIT License 5 votes vote down vote up
def _initialize_attributes(self):
        if np.trapz(self._yy, self.xx) != 1:
            logger.debug('Supplied PDF for {} is not normalised, normalising.'.format(self.name))
        self._yy /= np.trapz(self._yy, self.xx)
        self.YY = cumtrapz(self._yy, self.xx, initial=0)
        # Need last element of cumulative distribution to be exactly one.
        self.YY[-1] = 1
        self.probability_density = interp1d(x=self.xx, y=self._yy, bounds_error=False, fill_value=0)
        self.cumulative_distribution = interp1d(x=self.xx, y=self.YY, bounds_error=False, fill_value=(0, 1))
        self.inverse_cumulative_distribution = interp1d(x=self.YY, y=self.xx, bounds_error=True) 
Example #13
Source File: prior.py    From bilby with MIT License 5 votes vote down vote up
def _build_attributes(self):
        """
        Method that builds the inverse cdf of the P(pixel) distribution for rescaling
        """
        yy = self._all_interped(self.pix_xx)
        yy /= np.trapz(yy, self.pix_xx)
        YY = cumtrapz(yy, self.pix_xx, initial=0)
        YY[-1] = 1
        self.inverse_cdf = interp1d(x=YY, y=self.pix_xx, bounds_error=True) 
Example #14
Source File: viscio.py    From PyLAT with GNU General Public License v3.0 5 votes vote down vote up
def viscosity(self,cutoff):

        """
            cutoff: initial lines ignored during the calculation
            output: returns arrays for the time and the integration which is 
                    the viscosity in cP
        """

        NCORES=1
        p=Pool(NCORES)

        numtimesteps = len(self.llog['pxy'])
        calcsteps = np.floor((numtimesteps-cutoff)/10000)*10000
        cutoff = int(numtimesteps - calcsteps)
        a1=self.llog['pxy'][cutoff:]
        a2=self.llog['pxz'][cutoff:]
        a3=self.llog['pyz'][cutoff:]
        a4=self.llog['pxx'][cutoff:]-self.llog['pyy'][cutoff:]
        a5=self.llog['pyy'][cutoff:]-self.llog['pzz'][cutoff:]
        a6=self.llog['pxx'][cutoff:]-self.llog['pzz'][cutoff:]
        array_array=[a1,a2,a3,a4,a5,a6]
        pv=p.map(autocorrelate,array_array)
        pcorr = (pv[0]+pv[1]+pv[2])/6+(pv[3]+pv[4]+pv[5])/24
   	 
        temp=np.mean(self.llog['temp'][cutoff:])
       
        visco = (cumtrapz(pcorr,self.llog['step'][:len(pcorr)]))*self.llog['timestep']*10**-15*1000*101325.**2*self.llog['vol'][-1]*10**-30/(1.38*10**-23*temp)
        Time = np.array(self.llog['step'][:len(pcorr)-1])*self.llog['timestep']
        p.close()
        return (Time,visco) 
Example #15
Source File: getCoordinationNumber.py    From PyLAT with GNU General Public License v3.0 5 votes vote down vote up
def integrate(self, g,r, nummoltype, moltypel, V, mol):
        #integrates the radial distribution functions
        integrallist = []
        for i in range(0,len(g)):
            integrallist.append(g[i]*nummoltype[moltypel.index(mol)]/V*4*numpy.pi*r[i]**2)
        integral = cumtrapz(integrallist, x = r)
        integral= integral.tolist()
        return integral 
Example #16
Source File: calcCond.py    From PyLAT with GNU General Public License v3.0 5 votes vote down vote up
def integrateJ(self, J, dt):
        #Integrates the charge flux correlation function to calculate conductivity
        integral = np.zeros((len(J),len(J[0])))
        for i in range(0,len(J)):
            integral[i][1:] = cumtrapz(J[i], dx=dt)
        return integral 
Example #17
Source File: intensity_measures.py    From gmpe-smtk with GNU Affero General Public License v3.0 5 votes vote down vote up
def get_peak_measures(time_step, acceleration, get_vel=False, 
    get_disp=False):
    """
    Returns the peak measures from acceleration, velocity and displacement
    time-series
    :param float time_step:
        Time step of acceleration time series in s
    :param numpy.ndarray acceleration:
        Acceleration time series
    :param bool get_vel:
        Choose to return (and therefore calculate) velocity (True) or otherwise
        (false)
    :returns:
        * pga - Peak Ground Acceleration
        * pgv - Peak Ground Velocity
        * pgd - Peak Ground Displacement
        * velocity - Velocity Time Series
        * dispalcement - Displacement Time series
    """
    pga = np.max(np.fabs(acceleration))
    velocity = None
    displacement = None
    # If displacement is not required then do not integrate to get
    # displacement time series
    if get_disp:
        get_vel = True
    if get_vel:
        velocity = time_step * cumtrapz(acceleration, initial=0.)
        pgv = np.max(np.fabs(velocity))
    else:
        pgv = None
    if get_disp:
        displacement = time_step * cumtrapz(velocity, initial=0.)
        pgd = np.max(np.fabs(displacement))
    else:
        pgd = None
    return pga, pgv, pgd, velocity, displacement 
Example #18
Source File: qseis2d.py    From beat with GNU General Public License v3.0 5 votes vote down vote up
def get_traces(self):

        fn = self.config.get_output_filename(self.tempdir)
        data = num.loadtxt(fn, skiprows=1, dtype=num.float)
        nsamples, ntraces = data.shape
        deltat = (data[-1, 0] - data[0, 0]) / (nsamples - 1)
        toffset = data[0, 0]

        tred = self.config.time_reduction
        rec = self.config.receiver
        tmin = rec.tstart + toffset + deltat + tred

        traces = []
        for itrace, comp in enumerate(self.config.components):
            # qseis2d gives velocity-integrate to displacement
            # integration removes one sample, add it again in front
            displ = cumtrapz(num.concatenate(
                (num.zeros(1), data[:, itrace + 1])), dx=deltat)

            tr = trace.Trace(
                '', '%04i' % itrace, '', comp,
                tmin=tmin, deltat=deltat, ydata=displ,
                meta=dict(distance=rec.distance,
                          azimuth=0.0))

            traces.append(tr)

        return traces 
Example #19
Source File: test_quadrature.py    From Computable with MIT License 5 votes vote down vote up
def test_1d(self):
        x = np.linspace(-2, 2, num=5)
        y = x
        y_int = cumtrapz(y, x, initial=0)
        y_expected = [0., -1.5, -2., -1.5, 0.]
        assert_allclose(y_int, y_expected)

        y_int = cumtrapz(y, x, initial=None)
        assert_allclose(y_int, y_expected[1:]) 
Example #20
Source File: math_op.py    From ocelot with GNU General Public License v3.0 5 votes vote down vote up
def invert_cdf(y, x):
    """
    Invert cumulative distribution function of the probability distribution

    Example:
    --------
    # analytical formula for the beam distribution
    f = lambda x: A * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2))

    # we are interesting in range from -30 to 30 e.g. [um]
    x = np.linspace(-30, 30, num=100)

    # Inverted cumulative distribution function
    i_cdf = invert_cdf(y=f(x), x=x)

    # get beam distribution (200 000 coordinates)
    tau = i_cdf(np.random.rand(200000))


    :param y: array, [y0, y1, y2, ... yn] yi = y(xi)
    :param x: array, [x0, x1, x2, ... xn] xi
    :return: function
    """

    cum_int = integrate.cumtrapz(y, x, initial=0)
    #print("invert", np.max(cum_int), np.min(cum_int))
    cdf = cum_int / (np.max(cum_int) - np.min(cum_int))
    inv_cdf = interpolate.interp1d(cdf, x)
    return inv_cdf 
Example #21
Source File: radiation_py.py    From ocelot with GNU General Public License v3.0 5 votes vote down vote up
def s(self, n=0):
        self.check(n)

        xp2 = self.xp(n) ** 2
        yp2 = self.yp(n) ** 2
        # zp = np.sqrt(1. / (1. + xp2 + yp2))
        s = cumtrapz(np.sqrt(1. + xp2 + yp2), self.z(n), initial=0)
        return s 
Example #22
Source File: intensity_measures.py    From gmpe-smtk with GNU Affero General Public License v3.0 5 votes vote down vote up
def get_husid(acceleration, time_step):
    """
    Returns the Husid vector, defined as \int{acceleration ** 2.}
    :param numpy.ndarray acceleration:
        Vector of acceleration values
    :param float time_step:
        Time-step of record (s)
    """
    time_vector = get_time_vector(time_step, len(acceleration))
    husid = np.hstack([0., cumtrapz(acceleration ** 2., time_vector)])
    return husid, time_vector 
Example #23
Source File: simple_beam_metric.py    From Structural-Engineering with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def moment_area(self, *event):
        if self.has_run == 1:
            pass
        else:
            self.run()

        ml = self.momentl
        mc = self.momentc
        mr = self.momentr

        xsl = self.xsl
        xsc = self.xsc - xsl[-1]
        xsr = self.xsr - self.xsc[-1]

        al = sci_int.cumtrapz(ml, xsl, initial = 0)
        ac = sci_int.cumtrapz(mc, xsc, initial = 0)
        ar = sci_int.cumtrapz(mr, xsr, initial = 0)

        m_xl = []
        m_xc = []
        m_xr = []

        for i in range(0,len(xsl)):
            m_xl.append(ml[i]*xsl[i])
            m_xc.append(mc[i]*xsc[i])
            m_xr.append(mr[i]*xsr[i])

        m_xl_a = sci_int.cumtrapz(m_xl, xsl, initial = 0)
        m_xc_a = sci_int.cumtrapz(m_xc, xsc, initial = 0)
        m_xr_a = sci_int.cumtrapz(m_xr, xsr, initial = 0)

        xl_l = (1/al[-1])*m_xl_a[-1]
        xl_c = (1/ac[-1])*m_xc_a[-1]
        xl_r = (1/ar[-1])*m_xr_a[-1]

        xr_l = xsl[-1] - xl_l
        xr_c = xsc[-1] - xl_c
        xr_r = xsr[-1] - xl_r

        res_string = 'Left:\nA = {0:.4f}   Xleft = {1:.4f} m    Xright = {2:.4f} m\n'.format(al[-1],xl_l,xr_l)
        res_string = res_string+'Center:\nA = {0:.4f}   Xleft = {1:.4f} m    Xright = {2:.4f} m\n'.format(ac[-1],xl_c,xr_c)
        res_string = res_string+'Right:\nA = {0:.4f}   Xleft = {1:.4f} m    Xright = {2:.4f} m\n'.format(ar[-1],xl_r,xr_r)

        self.moment_area_label.configure(text=res_string) 
Example #24
Source File: simple_beam.py    From Structural-Engineering with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def moment_area(self, *event):
        if self.has_run == 1:
            pass
        else:
            self.run()

        ml = self.momentl
        mc = self.momentc
        mr = self.momentr

        xsl = self.xsl
        xsc = self.xsc - xsl[-1]
        xsr = self.xsr - self.xsc[-1]

        al = sci_int.cumtrapz(ml, xsl, initial = 0)
        ac = sci_int.cumtrapz(mc, xsc, initial = 0)
        ar = sci_int.cumtrapz(mr, xsr, initial = 0)

        m_xl = []
        m_xc = []
        m_xr = []

        for i in range(0,len(xsl)):
            m_xl.append(ml[i]*xsl[i])
            m_xc.append(mc[i]*xsc[i])
            m_xr.append(mr[i]*xsr[i])

        m_xl_a = sci_int.cumtrapz(m_xl, xsl, initial = 0)
        m_xc_a = sci_int.cumtrapz(m_xc, xsc, initial = 0)
        m_xr_a = sci_int.cumtrapz(m_xr, xsr, initial = 0)

        xl_l = (1/al[-1])*m_xl_a[-1]
        xl_c = (1/ac[-1])*m_xc_a[-1]
        xl_r = (1/ar[-1])*m_xr_a[-1]

        xr_l = xsl[-1] - xl_l
        xr_c = xsc[-1] - xl_c
        xr_r = xsr[-1] - xl_r

        res_string = 'Left:\nA = {0:.4f}   Xleft = {1:.4f} ft    Xright = {2:.4f} ft\n'.format(al[-1],xl_l,xr_l)
        res_string = res_string+'Center:\nA = {0:.4f}   Xleft = {1:.4f} ft    Xright = {2:.4f} ft\n'.format(ac[-1],xl_c,xr_c)
        res_string = res_string+'Right:\nA = {0:.4f}   Xleft = {1:.4f} ft    Xright = {2:.4f} ft\n'.format(ar[-1],xl_r,xr_r)

        self.moment_area_label.configure(text=res_string) 
Example #25
Source File: spectral_mixture_kernel.py    From gpytorch with MIT License 4 votes vote down vote up
def initialize_from_data_empspect(self, train_x, train_y):
        """
        Initialize mixture components based on the empirical spectrum of the data.

        This will often be better than the standard initialize_from_data method.
        """
        import numpy as np
        from scipy.fftpack import fft
        from scipy.integrate import cumtrapz

        if not torch.is_tensor(train_x) or not torch.is_tensor(train_y):
            raise RuntimeError("train_x and train_y should be tensors")
        if train_x.ndimension() == 1:
            train_x = train_x.unsqueeze(-1)

        N = train_x.size(-2)
        emp_spect = np.abs(fft(train_y.cpu().detach().numpy())) ** 2 / N
        M = math.floor(N / 2)

        freq1 = np.arange(M + 1)
        freq2 = np.arange(-M + 1, 0)
        freq = np.hstack((freq1, freq2)) / N
        freq = freq[: M + 1]
        emp_spect = emp_spect[: M + 1]

        total_area = np.trapz(emp_spect, freq)
        spec_cdf = np.hstack((np.zeros(1), cumtrapz(emp_spect, freq)))
        spec_cdf = spec_cdf / total_area

        a = np.random.rand(1000, self.ard_num_dims)
        p, q = np.histogram(a, spec_cdf)
        bins = np.digitize(a, q)
        slopes = (spec_cdf[bins] - spec_cdf[bins - 1]) / (freq[bins] - freq[bins - 1])
        intercepts = spec_cdf[bins - 1] - slopes * freq[bins - 1]
        inv_spec = (a - intercepts) / slopes

        from sklearn.mixture import GaussianMixture

        GMM = GaussianMixture(n_components=self.num_mixtures, covariance_type="diag").fit(inv_spec)
        means = GMM.means_
        varz = GMM.covariances_
        weights = GMM.weights_

        self.mixture_means = means
        self.mixture_scales = varz
        self.mixture_weights = weights 
Example #26
Source File: density_profiles.py    From MCEq with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def calculate_density_spline(self, n_steps=2000):
        """Calculates and stores a spline of :math:`\\rho(X)`.

        Args:
          n_steps (int, optional): number of :math:`X` values
                                   to use for interpolation

        Raises:
            Exception: if :func:`set_theta` was not called before.
        """
        from scipy.integrate import cumtrapz
        from time import time
        from scipy.interpolate import UnivariateSpline

        if self.theta_deg is None:
            raise Exception('zenith angle not set')
        else:
            info(
                5, 'Calculating spline of rho(X) for zenith {0:4.1f} degrees.'.
                format(self.theta_deg))

        thrad = self.thrad
        path_length = self.geom.l(thrad)
        vec_rho_l = np.vectorize(
            lambda delta_l: self.get_density(self.geom.h(delta_l, thrad)))
        dl_vec = np.linspace(0, path_length, n_steps)

        now = time()

        # Calculate integral for each depth point
        X_int = cumtrapz(vec_rho_l(dl_vec), dl_vec)  #
        dl_vec = dl_vec[1:]

        info(5, '.. took {0:1.2f}s'.format(time() - now))

        # Save depth value at h_obs
        self._max_X = X_int[-1]
        self._max_den = self.get_density(self.geom.h(0, thrad))

        # Interpolate with bi-splines without smoothing
        h_intp = [self.geom.h(dl, thrad) for dl in reversed(dl_vec[1:])]
        X_intp = [X for X in reversed(X_int[1:])]

        self._s_h2X = UnivariateSpline(h_intp, np.log(X_intp), k=2, s=0.0)
        self._s_X2rho = UnivariateSpline(X_int, vec_rho_l(dl_vec), k=2, s=0.0)
        self._s_lX2h = UnivariateSpline(np.log(X_intp)[::-1],
                                       h_intp[::-1],
                                       k=2,
                                       s=0.0) 
Example #27
Source File: csr.py    From ocelot with GNU General Public License v3.0 4 votes vote down vote up
def K0_fin_anf_numexpr(self, i, traj, wmin, gamma):
        # function [ w,KS ] = K0_inf_anf( i,traj,wmin,gamma )

        g2i = 1. / gamma ** 2
        b2 = 1. - g2i
        beta = np.sqrt(b2)
        i1 = i - 1  # ignore points i1+1:i on linear path to observer
        # ra = np.arange(0, i1+1)
        ind1 = i1 + 1
        s = traj[0, 0:ind1] - traj[0, i]
        n0 = traj[1, i] - traj[1, 0:ind1]
        n1 = traj[2, i] - traj[2, 0:ind1]
        n2 = traj[3, i] - traj[3, 0:ind1]
        R = ne.evaluate("sqrt(n0**2 + n1**2 + n2**2)")

        w = ne.evaluate('s + beta*R')
        j = np.where(w <= wmin)[0]

        if len(j) > 0:
            j = j[-1]
            w = w[j:ind1]
            s = s[j:ind1]
        else:
            j = 0
        R = R[j:ind1]
        n0 = n0[j:ind1] / R
        n1 = n1[j:ind1] / R
        n2 = n2[j:ind1] / R

        # kernel
        t4 = traj[4, j:i1 + 1]
        t5 = traj[5, j:i1 + 1]
        t6 = traj[6, j:i1 + 1]

        x = ne.evaluate('n0*t4 + n1*t5 + n2*t6')

        t4i = traj[4, i]
        t5i = traj[5, i]
        t6i = traj[6, i]
        K = ne.evaluate(
            '((beta*(x - n0*t4i- n1*t5i - n2*t6i) - b2*(1. - t4*t4i - t5*t5i - t6*t6i) - g2i)/R - (1. - beta*x)/w*g2i)')

        if len(K) > 1:
            a = np.append(0.5 * (K[0:-1] + K[1:]) * np.diff(s), 0.5 * K[-1] * s[-1])
            KS = np.cumsum(a[::-1])[::-1]
            # KS = cumtrapz(K[::-1], -s[::-1], initial=0)[::-1] + 0.5*K[-1]*s[-1]
        else:
            KS = 0.5 * K[-1] * s[-1]

        return w, KS 
Example #28
Source File: csr.py    From ocelot with GNU General Public License v3.0 4 votes vote down vote up
def K0_fin_anf_np(self, i, traj, wmin, gamma):
        # function [ w,KS ] = K0_inf_anf( i,traj,wmin,gamma )

        g2i = 1. / gamma ** 2
        b2 = 1. - g2i
        beta = np.sqrt(b2)
        i1 = i - 1  # ignore points i1+1:i on linear path to observer
        ind1 = i1 + 1
        s = traj[0, 0:ind1] - traj[0, i]
        n = np.array([traj[1, i] - traj[1, 0:ind1],
                      traj[2, i] - traj[2, 0:ind1],
                      traj[3, i] - traj[3, 0:ind1]])
        R = np.sqrt(np.sum(n ** 2, axis=0))

        w = s + beta * R
        j = np.where(w <= wmin)[0]

        if len(j) > 0:
            j = j[-1]
            w = w[j:ind1]
            s = s[j:ind1]
        else:
            j = 0
        R = R[j:ind1]
        n0 = n[0, j:ind1] / R
        n1 = n[1, j:ind1] / R
        n2 = n[2, j:ind1] / R

        # kernel
        t4 = traj[4, j:i1 + 1]
        t5 = traj[5, j:i1 + 1]
        t6 = traj[6, j:i1 + 1]

        x = n0 * t4 + n1 * t5 + n2 * t6
        K = ((beta * (x - n0 * traj[4, i] - n1 * traj[5, i] - n2 * traj[6, i]) -
              b2 * (1. - t4 * traj[4, i] - t5 * traj[5, i] - t6 * traj[6, i]) - g2i) / R - (1. - beta * x) / w * g2i)

        # K = ((beta*(n0*(t4 - traj[4, i]) +
        #            n1*(t5 - traj[5, i]) +
        #            n2*(t6 - traj[6, i])) -
        #    b2*(1. - t4*traj[4, i] - t5*traj[5, i] - t6*traj[6, i]) - g2i)/R[ra] -
        #    (1. - beta*(n0*t4 + n1*t5 + n2*t6))/w*g2i)

        # integrated kernel: KS=int_s^0{K(u)*du}=int_0^{-s}{K(-u)*du}

        if len(K) > 1:
            a = np.append(0.5 * (K[0:-1] + K[1:]) * np.diff(s), 0.5 * K[-1] * s[-1])
            KS = np.cumsum(a[::-1])[::-1]
            # KS = cumtrapz(K[::-1], -s[::-1], initial=0)[::-1] + 0.5*K[-1]*s[-1]
        else:
            KS = 0.5 * K[-1] * s[-1]

        return w, KS