Python numpy.trapz() Examples

The following are 30 code examples for showing how to use numpy.trapz(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: typhon   Author: atmtools   File: qrnn.py    License: MIT License 7 votes vote down vote up
def posterior_mean(self, x):
        r"""
        Computes the posterior mean by computing the first moment of the
        estimated posterior CDF.

        Arguments:

            x(np.array): Array of shape `(n, m)` containing `n` inputs for which
                         to predict the posterior mean.
        Returns:

            Array containing the posterior means for the provided inputs.
        """
        y_pred, qs = self.cdf(x)
        mus = y_pred[-1] - np.trapz(qs, x=y_pred)
        return mus 
Example 2
Project: pulse2percept   Author: pulse2percept   File: test_base.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_gamma():
    tsample = 0.005 / 1000

    with pytest.raises(ValueError):
        t, g = gamma(0, 0.1, tsample)
    with pytest.raises(ValueError):
        t, g = gamma(2, -0.1, tsample)
    with pytest.raises(ValueError):
        t, g = gamma(2, 0.1, -tsample)

    for tau in [0.001, 0.01, 0.1]:
        for n in [1, 2, 5]:
            t, g = gamma(n, tau, tsample)
            npt.assert_equal(np.arange(0, t[-1] + tsample / 2.0, tsample), t)
            if n > 1:
                npt.assert_equal(g[0], 0.0)

            # Make sure area under the curve is normalized
            npt.assert_almost_equal(np.trapz(np.abs(g), dx=tsample), 1.0,
                                    decimal=2)

            # Make sure peak sits correctly
            npt.assert_almost_equal(g.argmax() * tsample, tau * (n - 1)) 
Example 3
Project: typhon   Author: atmtools   File: common.py    License: MIT License 6 votes vote down vote up
def integrate_column(y, x=None, axis=0):
    """Integrate array along an arbitrary axis.

    Note:
        This function is just a wrapper for :func:`numpy.trapz`.

    Parameters:
        y (ndarray): Data array.
        x (ndarray): Coordinate array.
        axis (int): Axis to integrate along for multidimensional input.

    Returns:
        float or ndarray: Column integral.

    Examples:
        >>> import numpy as np
        >>> x = np.linspace(0, 1, 5)
        >>> y = np.arange(5)
        >>> integrate_column(y)
        8.0
        >>> integrate_column(y, x)
        2.0
    """
    return np.trapz(y, x, axis=axis) 
Example 4
Project: pybids   Author: bids-standard   File: test_transformations.py    License: MIT License 6 votes vote down vote up
def test_resample(collection):
    coll = collection.clone()

    transform.ToDense(coll, 'parametric gain', output='pg_dense')
    pg = coll.variables['pg_dense']
    old_shape = pg.values.shape
    old_auc = np.trapz(np.abs(pg.values.values.squeeze()), dx=0.1)
    transform.Resample(coll, 'pg_dense', 1)
    pg = coll.variables['pg_dense']
    new_shape = pg.values.shape
    # Spacing (dx) is 10* larger when downsampled fro 10hz to 1hz
    new_auc = np.trapz(np.abs(pg.values.values.squeeze()), dx=1)

    # Shape from 10hz to 1hz
    assert new_shape[0] == old_shape[0] / 10

    # Assert that the auc is more or less the same (not exact, rounding error)
    # Values are around 0.25
    assert np.allclose(old_auc, new_auc, rtol=0.05) 
Example 5
Project: ocelot   Author: ocelot-collab   File: genesis4.py    License: GNU General Public License v3.0 6 votes vote down vote up
def calc_spec(self, zi=None, loc='near', npad=1, estimate_ph_sp_dens=1):

        field = self.rad_field(zi=zi, loc=loc)
        axis = field.ndim - 1

        spec = np.abs(np.fft.fft(field, axis=axis))**2
        spec = np.fft.fftshift(spec, axes=axis)

        scale_ev = h_eV_s * speed_of_light * (np.fft.fftfreq(self.nSlices, d=self.s[1]-self.s[0]) + 1 / self.lambdaref)
        scale_ev = np.fft.fftshift(scale_ev)

        if estimate_ph_sp_dens:
            tt=np.trapz(spec, scale_ev, axis=axis)
            if axis==1:
                tt[tt==0] = np.inf
                spec *= (self.n_photons / tt)[:, np.newaxis]
            else:
                if tt==0:
                    tt = np.inf
                spec *= (self.n_photons[zi] / tt)

        return scale_ev, spec 
Example 6
Project: rainymotion   Author: hydrogo   File: metrics.py    License: MIT License 6 votes vote down vote up
def AUC(x, y):
    '''
    AUC - area under curve

    Note: area under curve wich had been computed by standard trapezial
          method (np.trapz)

    Args:

        x (numpy.ndarray): array of one metric rate (1D)
        y (numpy.ndarray): array of another metric rate (1D)

    Returns:

        float - area under curve

    '''

    return np.trapz(y, x) 
Example 7
Project: rampy   Author: charlesll   File: test_tlcorrection.py    License: GNU General Public License v2.0 6 votes vote down vote up
def test_tlcorrection(self):
        # testing long correction function
        x_for_long = np.array([20.,21.,22.,23.,24.,25.])
        y_for_long = np.array([1.0,1.0,1.0,1.0,1.0,1.0])

        nu0 = 1.0/(514.532)*1e9 #laser wavenumber at 514.532
        nu = 100.0*x_for_long # cm-1 to m-1
        T = 23.0+273.15 # the temperature in K

        x_long,long_res,eselong = rp.tlcorrection(x_for_long, y_for_long,23.0,514.532,correction = 'long',normalisation='area') # using the function
        t0 = nu0**3.0*nu/((nu0-nu)**4)
        t1= 1.0 - np.exp(-h*c*nu/(k*T)) # c in m/s  : t1 dimensionless
        long_calc= y_for_long*t0*t1 # pour les y
        long_calc = long_calc/np.trapz(long_calc,x_for_long) # area normalisation

        np.testing.assert_equal(long_res,long_calc)
        np.testing.assert_equal(x_for_long,x_long)

        x_long,long_res,eselong = rp.tlcorrection(x_for_long, y_for_long,23.0,514.532,correction = 'long',normalisation='no') # using the function
        t0 = nu0**3.0*nu/((nu0-nu)**4)
        t1= 1.0 - np.exp(-h*c*nu/(k*T)) # c in m/s  : t1 dimensionless
        long_calc= y_for_long*t0*t1 # pour les y

        np.testing.assert_equal(long_res,long_calc)
        np.testing.assert_equal(x_for_long,x_long) 
Example 8
Project: pyhawkes   Author: slinderman   File: basis.py    License: MIT License 6 votes vote down vote up
def interpolate_basis(self, basis, dt, dt_max,
                          norm=True):
        # Interpolate basis at the resolution of the data
        L,B = basis.shape
        t_int = np.arange(0.0, dt_max, step=dt)
        t_bas = np.linspace(0.0, dt_max, L)

        ibasis = np.zeros((len(t_int), B))
        for b in np.arange(B):
            ibasis[:,b] = np.interp(t_int, t_bas, basis[:,b])

        # Normalize so that the interpolated basis has volume 1
        if norm:
            # ibasis /= np.trapz(ibasis,t_int,axis=0)
            ibasis /= (dt * np.sum(ibasis, axis=0))

        if not self.allow_instantaneous:
            # Typically, the impulse responses are applied to times
            # (t+1:t+R). That means we need to prepend a row of zeros to make
            # sure the basis remains causal
            ibasis = np.vstack((np.zeros((1,B)), ibasis))

        return ibasis 
Example 9
Project: pyhawkes   Author: slinderman   File: test_model.py    License: MIT License 6 votes vote down vote up
def test_compute_rate():
    K = 1
    T = 100
    dt = 1.0
    network_hypers = {'c': np.zeros(K, dtype=np.int), 'p': 1.0, 'kappa': 10.0, 'v': 10*5.0}
    true_model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(K=K, dt=dt,
                                                            network_hypers=network_hypers)
    S,R = true_model.generate(T=T)

    print("Expected number of events: ", np.trapz(R, dt * np.arange(T), axis=0))
    print("Actual number of events:   ", S.sum(axis=0))

    print("Lambda0:  ", true_model.bias_model.lambda0)
    print("W:        ", true_model.weight_model.W)
    print("")

    R_test = true_model.compute_rate()
    assert np.allclose(R, R_test) 
Example 10
Project: pyhawkes   Author: slinderman   File: test_model.py    License: MIT License 6 votes vote down vote up
def test_generate_statistics():
    K = 1
    T = 100
    dt = 1.0
    network_hypers = {'c': np.zeros(K, dtype=np.int), 'p': 1.0, 'kappa': 10.0, 'v': 10*5.0}
    true_model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(K=K, dt=dt,
                                                            network_hypers=network_hypers)
    S,R = true_model.generate(T=T)

    E_N = np.trapz(R, dt * np.arange(T), axis=0)
    std_N = np.sqrt(E_N)
    N = S.sum(axis=0)

    assert np.all(N >= E_N - 3*std_N), "N less than 3std below mean"
    assert np.all(N <= E_N + 3*std_N), "N more than 3std above mean"

    print("Expected number of events: ", E_N)
    print("Actual number of events:   ", S.sum(axis=0)) 
Example 11
Project: dmipy   Author: AthenaEPI   File: test_callaghan.py    License: MIT License 6 votes vote down vote up
def test_RTAP_to_diameter_callaghan(samples=10000):
    mu = [0, 0]
    lambda_par = 1.7
    diameter = 10e-6

    delta = np.tile(1e-10, samples)  # delta towards zero
    Delta = np.tile(1e10, samples)  # Delta towards infinity
    qvals_perp = np.linspace(0, 10e6, samples)
    n_perp = np.tile(np.r_[1., 0., 0.], (samples, 1))
    scheme = acquisition_scheme_from_qvalues(qvals_perp, n_perp, delta, Delta)

    callaghan = cylinder_models.C3CylinderCallaghanApproximation(
        mu=mu, lambda_par=lambda_par, diameter=diameter)

    E_callaghan = callaghan(scheme)

    rtap_callaghan = 2 * np.pi * np.trapz(
        E_callaghan * qvals_perp, x=qvals_perp
    )

    diameter_callaghan = 2 / np.sqrt(np.pi * rtap_callaghan)
    assert_almost_equal(diameter_callaghan, diameter, 7) 
Example 12
Project: dmipy   Author: AthenaEPI   File: test_callaghan.py    License: MIT License 6 votes vote down vote up
def test_RTPP_to_length_callaghan(samples=1000):
    length = 10e-6

    delta = 1e-10  # delta towards zero
    Delta = 1e10  # Delta towards infinity
    qvals_perp = np.linspace(0, 10e6, samples)
    n_perp = np.tile(np.r_[1., 0., 0.], (samples, 1))
    scheme = acquisition_scheme_from_qvalues(qvals_perp, n_perp, delta, Delta)

    plane = plane_models.P3PlaneCallaghanApproximation(diameter=length)
    E_callaghan = plane(scheme)

    rtpp_callaghan = 2 * np.trapz(E_callaghan, x=qvals_perp)

    length_callaghan = 1 / rtpp_callaghan
    assert_almost_equal(length_callaghan, length, 7) 
Example 13
Project: dmipy   Author: AthenaEPI   File: test_soderman.py    License: MIT License 6 votes vote down vote up
def test_RTPP_to_diameter_soderman(samples=1000):
    """This tests if the RTPP of the plane relates correctly to the diameter
    of the plane."""
    diameter = 10e-6

    delta = np.tile(1e-10, samples)  # delta towards zero
    Delta = np.tile(1e10, samples)  # Delta towards infinity
    qvals_perp = np.linspace(0, 10e6, samples)
    n_perp = np.tile(np.r_[1., 0., 0.], (samples, 1))
    scheme = acquisition_scheme_from_qvalues(
        qvals_perp, n_perp, delta, Delta)

    soderman = plane_models.P2PlaneStejskalTannerApproximation(
        diameter=diameter)

    E_soderman = soderman(scheme)

    rtpp_soderman = 2 * np.trapz(
        E_soderman, x=qvals_perp
    )

    diameter_soderman = 1 / rtpp_soderman

    assert_almost_equal(diameter_soderman, diameter, 7) 
Example 14
Project: dmipy   Author: AthenaEPI   File: test_soderman.py    License: MIT License 6 votes vote down vote up
def test_RTOP_to_diameter_soderman(samples=1000):
    """This tests if the RTAP of the sphere relates correctly to the diameter
    of the sphere."""
    diameter = 10e-6

    delta = np.tile(1e-10, samples)  # delta towards zero
    Delta = np.tile(1e10, samples)  # Delta towards infinity
    qvals_perp = np.linspace(0, 10e6, samples)
    n_perp = np.tile(np.r_[1., 0., 0.], (samples, 1))
    scheme = acquisition_scheme_from_qvalues(
        qvals_perp, n_perp, delta, Delta)

    soderman = sphere_models.S2SphereStejskalTannerApproximation(
        diameter=diameter)

    E_soderman = soderman(scheme)

    rtop_soderman = 4 * np.pi * np.trapz(
        E_soderman * qvals_perp ** 2, x=qvals_perp
    )

    sphere_volume = 1. / rtop_soderman
    diameter_soderman = 2 * (sphere_volume / ((4. / 3.) * np.pi)) ** (1. / 3.)

    assert_almost_equal(diameter_soderman, diameter, 7) 
Example 15
Project: tick   Author: X-DataInitiative   File: plot_hawkes.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _normalize_functions(y_values_list, t_values):
    """Normalize list of functions by their integral value

    Parameters
    ----------
    y_values_list : `list` of np.ndarray
        y values of the list of function we want to normalize

    t_values : `np.ndarray`
        t values shared by all functions given with y_values_list

    Returns
    -------
    normalized_y_values_list : `list` of np.ndarray
        Normalized y values of the given list of function

    normalizations : `np.ndarray`
        Normalization factors that have been used
    """
    y_values_list = np.array(y_values_list)
    normalizations = [
        1. / np.trapz(y_values, t_values) for y_values in y_values_list
    ]
    normalized_y_values_list = (y_values_list.T * normalizations).T
    return normalized_y_values_list, normalizations 
Example 16
Project: pruning_yolov3   Author: zbyuan   File: utils.py    License: GNU General Public License v3.0 5 votes vote down vote up
def compute_ap(recall, precision):
    """ Compute the average precision, given the recall and precision curves.
    Source: https://github.com/rbgirshick/py-faster-rcnn.
    # Arguments
        recall:    The recall curve (list).
        precision: The precision curve (list).
    # Returns
        The average precision as computed in py-faster-rcnn.
    """

    # Append sentinel values to beginning and end
    mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))
    mpre = np.concatenate(([0.], precision, [0.]))

    # Compute the precision envelope
    for i in range(mpre.size - 1, 0, -1):
        mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

    # Integrate area under curve
    method = 'interp'  # methods: 'continuous', 'interp'
    if method == 'interp':
        x = np.linspace(0, 1, 101)  # 101-point interp (COCO)
        ap = np.trapz(np.interp(x, mrec, mpre), x)  # integrate
    else:  # 'continuous'
        i = np.where(mrec[1:] != mrec[:-1])[0]  # points where x axis (recall) changes
        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])  # area under curve

    return ap 
Example 17
Project: pointnet-registration-framework   Author: vinits5   File: plot_threshold_vs_success.py    License: MIT License 5 votes vote down vote up
def make_plot(files, labels):
	plt.figure()
	AUC = []		# Calculate Area under curve for each test folder. (quantification of success)

	for file_idx in range(len(files)):
		rot_err, trans_err = read_csv(files[file_idx])
		success_dict = count_success_rot(rot_err)

		x_range = list(success_dict.keys())
		x_range.sort()
		success = []
		for i in x_range:
			success.append(success_dict[i])

		# Ratio of successful cases to total test cases.
		success = np.array(success)/total_cases

		area = np.trapz(success, dx=0.5)
		AUC.append(area)

		plt.plot(x_range, success, linewidth=6, label=labels[file_idx])
	
	plt.xlabel('Rotation Error for Success Criteria', fontsize=40)
	plt.ylabel('Success Ratio', fontsize=40)
	
	plt.tick_params(labelsize=40, width=3, length=10)
	
	plt.xticks(np.arange(0,180.5,30))
	plt.yticks(np.arange(0,1.1,0.2))
		
	plt.xlim(-0.5,180)
	plt.ylim(0,1.01)
	
	plt.grid(True)
	plt.legend(fontsize=30, loc=4)

	AUC = np.array(AUC)/180.0
	print('Area Under Curve values: {}'.format(AUC))
	np.savetxt('auc.txt',AUC) 
Example 18
Project: NeuroKit   Author: neuropsychology   File: entropy_multiscale.py    License: MIT License 5 votes vote down vote up
def _entropy_multiscale(
    signal, scale="default", dimension=2, r="default", composite=False, fuzzy=False, refined=False, show=False, **kwargs
):

    r = _get_r(signal, r=r, dimension=dimension)
    scale_factors = _get_scale(signal, scale=scale, dimension=dimension)

    # Initalize mse vector
    mse = np.full(len(scale_factors), np.nan)
    for i, tau in enumerate(scale_factors):

        # Regular MSE
        if refined is False and composite is False:
            mse[i] = _entropy_multiscale_mse(signal, tau, dimension, r, fuzzy, **kwargs)

        # Composite MSE
        elif refined is False and composite is True:
            mse[i] = _entropy_multiscale_cmse(signal, tau, dimension, r, fuzzy, **kwargs)

        # Refined Composite MSE
        else:
            mse[i] = _entropy_multiscale_rcmse(signal, tau, dimension, r, fuzzy, **kwargs)

    if show is True:
        plt.plot(scale_factors, mse)

    # Remove inf, nan and 0
    mse = mse[~np.isnan(mse)]
    mse = mse[mse != np.inf]
    mse = mse[mse != -np.inf]

    # The MSE index is quantified as the area under the curve (AUC),
    # which is like the sum normalized by the number of values. It's similar to the mean.
    return np.trapz(mse) / len(mse)


# =============================================================================
# Methods
# ============================================================================= 
Example 19
Project: NeuroKit   Author: neuropsychology   File: signal_power.py    License: MIT License 5 votes vote down vote up
def _signal_power_instant_get(psd, frequency_band):

    indices = np.logical_and(
        psd["Frequency"] >= frequency_band[0], psd["Frequency"] < frequency_band[1]
    ).values  # pylint: disable=no-member

    out = {}
    out["{:.2f}-{:.2f}Hz".format(frequency_band[0], frequency_band[1])] = np.trapz(
        y=psd["Power"][indices], x=psd["Frequency"][indices]
    )
    out = {key: np.nan if value == 0.0 else value for key, value in out.items()}
    return out 
Example 20
Project: connecting_the_dots   Author: autonomousvision   File: metric.py    License: MIT License 5 votes vote down vote up
def get(self):
    tps = np.array(self.tps).astype(np.float32)
    fps = np.array(self.fps).astype(np.float32)
    fns = np.array(self.fns).astype(np.float32)
    tns = np.array(self.tns).astype(np.float32)
    wp = self.thresholds

    ret = {}

    precisions = np.divide(tps, tps + fps, out=np.zeros_like(tps), where=tps + fps != 0)
    recalls = np.divide(tps, tps + fns, out=np.zeros_like(tps), where=tps + fns != 0) # tprs
    fprs = np.divide(fps, fps + tns, out=np.zeros_like(tps), where=fps + tns != 0)

    precisions = np.r_[0, precisions, 1]
    recalls = np.r_[1, recalls, 0]
    fprs = np.r_[1, fprs, 0]

    ret['auc'] = float(-np.trapz(recalls, fprs))
    ret['prauc'] = float(-np.trapz(precisions, recalls))
    ret['ap'] = float(-(np.diff(recalls) * precisions[:-1]).sum())

    accuracies = np.divide(tps + tns, tps + tns + fps + fns)
    aacc = np.mean(accuracies)
    for t in np.linspace(0,1,num=11)[1:-1]:
      idx = np.argmin(np.abs(t - wp))
      ret[f'acc{wp[idx]:.2f}'] = float(accuracies[idx])

    return ret 
Example 21
Project: recruit   Author: Frank-qlu   File: test_interaction.py    License: Apache License 2.0 5 votes vote down vote up
def test_trapz_matrix():
    # Test to make sure matrices give the same answer as ndarrays
    # 2018-04-29: moved here from core.tests.test_function_base.
    x = np.linspace(0, 5)
    y = x * x
    r = np.trapz(y, x)
    mx = np.matrix(x)
    my = np.matrix(y)
    mr = np.trapz(my, mx)
    assert_almost_equal(mr, r) 
Example 22
Project: gmpe-smtk   Author: GEMScienceTools   File: intensity_measures.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def get_cav(acceleration, time_step, threshold=0.0):
    """
    Returns the cumulative absolute velocity above a given threshold of
    acceleration
    """
    acceleration = np.fabs(acceleration)
    idx = np.where(acceleration >= threshold)
    if len(idx) > 0:
        return np.trapz(acceleration[idx], dx=time_step)
    else:
        return 0.0 
Example 23
Project: gmpe-smtk   Author: GEMScienceTools   File: intensity_measures.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def get_arms(acceleration, time_step):
    """
    Returns the root mean square acceleration, defined as
    sqrt{(1 / duration) * \int{acc ^ 2} dt}
    """
    dur = time_step * float(len(acceleration) - 1)
    return np.sqrt((1. / dur) * np.trapz(acceleration  ** 2., dx=time_step)) 
Example 24
Project: gmpe-smtk   Author: GEMScienceTools   File: intensity_measures.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def get_response_spectrum_intensity(spec):
    """
    Returns the response spectrum intensity (Housner intensity), defined
    as the integral of the pseudo-velocity spectrum between the periods of
    0.1 s and 2.5 s
    :param dict spec:
        Response spectrum of the record as output from :class:
        smtk.response_spectrum.BaseResponseSpectrum
    """
    idx = np.where(np.logical_and(spec["Period"] >= 0.1,
                                  spec["Period"] <= 2.5))[0]
    return np.trapz(spec["Pseudo-Velocity"][idx],
                    spec["Period"][idx]) 
Example 25
Project: gmpe-smtk   Author: GEMScienceTools   File: intensity_measures.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def get_acceleration_spectrum_intensity(spec):
    """
    Returns the acceleration spectrum intensity, defined as the integral
    of the psuedo-acceleration spectrum between the periods of 0.1 and 0.5 s
    """
    idx = np.where(np.logical_and(spec["Period"] >= 0.1,
                                  spec["Period"] <= 0.5))[0]
    return np.trapz(spec["Pseudo-Acceleration"][idx],
                    spec["Period"][idx]) 
Example 26
Project: gmpe-smtk   Author: GEMScienceTools   File: intensity_measures.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def get_quadratic_intensity(acc_x, acc_y, time_step):
    """
    Returns the quadratic intensity of a pair of records, define as:
    (1. / duration) * \int_0^{duration} a_1(t) a_2(t) dt
    This assumes the time-step of the two records is the same!
    """
    assert len(acc_x) == len(acc_y)
    dur = time_step * float(len(acc_x) - 1)
    return (1. /  dur) * np.trapz(acc_x * acc_y, dx=time_step) 
Example 27
Project: typhon   Author: atmtools   File: arts_lookup.py    License: MIT License 5 votes vote down vote up
def calc_opacity_from_lookup(lookup, z=None, g=typhon.constants.g,
                             r=typhon.constants.gas_constant_dry_air, tpert=0):
    """Calculate the opacity from an ARTS lookup table.

    Parameters:
        lookup (typhon.arts.catalogues.GasAbsLookup): ARTS lookup table.
        z (ndarray, optional): Altitude profile. If not given, the layer
            thicknesses are calculated based on the hypsometric formula.
        g (float, optional): Gravity constant. Default uses Earth's gravity.
        r (float, optional): Gas constant for dry air. Default uses constant
            for Earth.
        tpert (int, optional): Index of temperature perturbation to plot.

    Returns:
        ndarray: Opacity per species in lookup table.
    """
    speciescount = _calc_lookup_species_count(lookup)
    vmrs = (np.repeat(lookup.referencevmrprofiles, speciescount, axis=0)
            if lookup.nonlinearspecies is not None
            else lookup.referencevmrprofiles)

    ni = (lookup.pressuregrid * vmrs
          / lookup.referencetemperatureprofile / typhon.constants.boltzmann
          ).reshape(np.sum(speciescount), 1, len(lookup.pressuregrid))

    alpha = ni * lookup.absorptioncrosssection[tpert, :, :, :]

    if z is not None:
        z = interp1d(z.grids[0], z.data[:, 0, 0])(lookup.pressuregrid)
    else:
        # Calculate z from hypsometric formula
        pgrid = lookup.pressuregrid
        z = [r * t / g * np.log(p1 / p2)
             for p1, p2, t in zip(pgrid[:-1], pgrid[1:], (
                    lookup.referencetemperatureprofile[
                    1:] + lookup.referencetemperatureprofile[:-1]) / 2.)]
        z = np.cumsum(z)
        p = (pgrid[1:] + pgrid[:-1]) / 2.
        z = interp1d(p, z, fill_value='extrapolate')(lookup.pressuregrid)

    return np.vstack([np.trapz(ialpha, z, axis=1) for ialpha in alpha]) 
Example 28
Project: TFFRCNN   Author: CharlesShang   File: imdb2.py    License: MIT License 5 votes vote down vote up
def evaluate_recall(self, candidate_boxes, ar_thresh=0.5):
        # Record max overlap value for each gt box
        # Return vector of overlap values
        gt_overlaps = np.zeros(0)
        for i in xrange(self.num_images):
            gt_inds = np.where(self.roidb[i]['gt_classes'] > 0)[0]
            gt_boxes = self.roidb[i]['boxes'][gt_inds, :]

            boxes = candidate_boxes[i]
            if boxes.shape[0] == 0:
                continue
            overlaps = bbox_overlaps(boxes.astype(np.float),
                                     gt_boxes.astype(np.float))

            # gt_overlaps = np.hstack((gt_overlaps, overlaps.max(axis=0)))
            _gt_overlaps = np.zeros((gt_boxes.shape[0]))
            for j in xrange(gt_boxes.shape[0]):
                argmax_overlaps = overlaps.argmax(axis=0)
                max_overlaps = overlaps.max(axis=0)
                gt_ind = max_overlaps.argmax()
                gt_ovr = max_overlaps.max()
                assert(gt_ovr >= 0)
                box_ind = argmax_overlaps[gt_ind]
                _gt_overlaps[j] = overlaps[box_ind, gt_ind]
                assert(_gt_overlaps[j] == gt_ovr)
                overlaps[box_ind, :] = -1
                overlaps[:, gt_ind] = -1

            gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))

        num_pos = gt_overlaps.size
        gt_overlaps = np.sort(gt_overlaps)
        step = 0.001
        thresholds = np.minimum(np.arange(0.5, 1.0 + step, step), 1.0)
        recalls = np.zeros_like(thresholds)
        for i, t in enumerate(thresholds):
            recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
        ar = 2 * np.trapz(recalls, thresholds)

        return ar, gt_overlaps, recalls, thresholds 
Example 29
Project: mars   Author: mars-project   File: trapz.py    License: Apache License 2.0 5 votes vote down vote up
def __call__(self, y, x=None):
        inputs = [y]
        order = y.order
        if x is not None:
            x = astensor(x)
            inputs.append(x)
            if x.order == TensorOrder.C_ORDER:
                order = TensorOrder.C_ORDER

        shape = tuple(s for ax, s in enumerate(y.shape)
                      if ax != self._axis)
        dtype = np.trapz(np.empty(1, dtype=y.dtype)).dtype
        return self.new_tensor(inputs, shape=shape, dtype=dtype,
                               order=order) 
Example 30
Project: mars   Author: mars-project   File: trapz.py    License: Apache License 2.0 5 votes vote down vote up
def execute(cls, ctx, op: "TensorTrapz"):
        inputs, device_id, xp = as_same_device(
            [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)

        y = inputs[0]
        if len(inputs) > 1:
            x = inputs[-1]
        else:
            x = None

        with device(device_id):
            ctx[op.outputs[0].key] = xp.trapz(y, x=x, dx=op.dx,
                                              axis=op.axis)