Python xarray.apply_ufunc() Examples

The following are 30 code examples of xarray.apply_ufunc(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module xarray , or try the search function .
Example #1
Source File: initialization.py    From minian with GNU General Public License v3.0 6 votes vote down vote up
def ks_refine(varr, seeds, sig=0.05):
    print("selecting seeds")
    varr_sub = varr.sel(
        spatial=[tuple(hw) for hw in seeds[['height', 'width']].values])
    print("performing KS test")
    ks = xr.apply_ufunc(
        lambda x: kstest(zscore(x), 'norm')[1],
        varr_sub.chunk(dict(frame=-1, spatial='auto')),
        input_core_dims=[['frame']],
        vectorize=True,
        dask='parallelized',
        output_dtypes=[float])
    mask = ks < sig
    mask_df = mask.to_pandas().rename('mask_ks').reset_index()
    seeds = pd.merge(seeds, mask_df, on=['height', 'width'], how='left')
    return seeds 
Example #2
Source File: deterministic.py    From xskillscore with Apache License 2.0 6 votes vote down vote up
def _determine_input_core_dims(dim, weights):
    """
    Determine input_core_dims based on type of dim and weights.

    Parameters
    ----------
    dim : str, list
        The dimension(s) to apply the correlation along.
    weights : xarray.Dataset or xarray.DataArray or None
        Weights matching dimensions of ``dim`` to apply during the function.

    Returns
    -------
    list of lists
        input_core_dims used for xr.apply_ufunc.
    """
    if not isinstance(dim, list):
        dim = [dim]
    # build input_core_dims depending on weights
    if weights is None:
        input_core_dims = [dim, dim, [None]]
    else:
        input_core_dims = [dim, dim, dim]
    return input_core_dims 
Example #3
Source File: run_length.py    From xclim with Apache License 2.0 6 votes vote down vote up
def windowed_run_events_ufunc(x: Sequence[bool], window: int) -> xr.apply_ufunc:
    """Dask-parallel version of windowed_run_events_1d, ie the number of runs at least as long as given duration.

    Parameters
    ----------
    x : Sequence[bool]
      Input array (bool)
    window : int
      Minimum run length

    Returns
    -------
    out : func
      A function operating along the time dimension of a dask-array.
    """
    return xr.apply_ufunc(
        windowed_run_events_1d,
        x,
        input_core_dims=[["time"]],
        vectorize=True,
        dask="parallelized",
        output_dtypes=[np.int],
        keep_attrs=True,
        kwargs={"window": window},
    ) 
Example #4
Source File: run_length.py    From xclim with Apache License 2.0 6 votes vote down vote up
def longest_run_ufunc(x: Sequence[bool]) -> xr.apply_ufunc:
    """Dask-parallel version of longest_run_1d, ie the maximum number of consecutive true values in
    array.

    Parameters
    ----------
    x : Sequence[bool]
      Input array (bool)

    Returns
    -------
    out : func
      A function operating along the time dimension of a dask-array.
    """
    return xr.apply_ufunc(
        longest_run_1d,
        x,
        input_core_dims=[["time"]],
        vectorize=True,
        dask="parallelized",
        output_dtypes=[np.int],
        keep_attrs=True,
    ) 
Example #5
Source File: cnmf.py    From minian with GNU General Public License v3.0 6 votes vote down vote up
def psd_fft(varr):
    _T = len(varr.coords['frame'])
    ns = _T // 2 + 1
    if _T % 2 == 0:
        freq_crd = np.linspace(0, 0.5, ns)
    else:
        freq_crd = np.linspace(0, 0.5 * (_T - 1) / _T, ns)
    print("computing psd of input")
    varr_fft = xr.apply_ufunc(
        fftw.rfft,
        varr.chunk(dict(frame=-1)),
        input_core_dims=[['frame']],
        output_core_dims=[['freq']],
        dask='allowed',
        output_sizes=dict(freq=ns),
        output_dtypes=[np.complex_])
    varr_fft = varr_fft.assign_coords(freq=freq_crd)
    varr_psd = 1 / _T * np.abs(varr_fft)**2
    return varr_psd 
Example #6
Source File: cnmf.py    From minian with GNU General Public License v3.0 6 votes vote down vote up
def psd_welch(varr):
    _T = len(varr.coords['frame'])
    ns = _T // 2 + 1
    if _T % 2 == 0:
        freq_crd = np.linspace(0, 0.5, ns)
    else:
        freq_crd = np.linspace(0, 0.5 * (_T - 1) / _T, ns)
    varr_psd = xr.apply_ufunc(
            _welch,
            varr.chunk(dict(frame=-1)),
            input_core_dims=[['frame']],
            output_core_dims=[['freq']],
            dask='parallelized',
            vectorize=True,
            kwargs=dict(nperseg=_T),
            output_sizes=dict(freq=ns),
            output_dtypes=[varr.dtype])
    varr_psd = varr_psd.assign_coords(freq=freq_crd)
    return varr_psd 
Example #7
Source File: cnmf.py    From minian with GNU General Public License v3.0 6 votes vote down vote up
def get_noise_welch(varr,
                    noise_range=(0.25, 0.5),
                    noise_method='logmexp',
                    compute=True):
    print("estimating noise")
    sn = xr.apply_ufunc(
        noise_welch,
        varr.chunk(dict(frame=-1)),
        input_core_dims=[['frame']],
        dask='parallelized',
        vectorize=True,
        kwargs=dict(noise_range=noise_range, noise_method=noise_method),
        output_dtypes=[varr.dtype])
    if compute:
        sn = sn.compute()
    return sn 
Example #8
Source File: vertical_coordinates.py    From xarrayutils with MIT License 5 votes vote down vote up
def linear_interpolation_remap(
    z, data, z_regridded, z_dim=None, z_regridded_dim="regridded", output_dim="remapped"
):
    # infer dim from input
    if z_dim is None:
        if len(z.dims) != 1:
            raise RuntimeError(
                "if z_dim is not specified, \
                               x must be a 1D array."
            )
        dim = z.dims[0]
    else:
        dim = z_dim

    # if dataset is passed drop all data_vars that dont contain dim
    if isinstance(data, xr.Dataset):
        raise ValueError("Dataset input is not supported yet")
        # TODO: for a datset input just apply the function for each appropriate array

    kwargs = dict(
        input_core_dims=[[dim], [dim], [z_regridded_dim]],
        output_core_dims=[[output_dim]],
        vectorize=True,
        dask="parallelized",
        output_dtypes=[data.dtype],
        output_sizes={output_dim: len(z_regridded[z_regridded_dim])},
    )
    remapped = xr.apply_ufunc(_regular_interp, z, data, z_regridded, **kwargs)

    remapped.coords[output_dim] = z_regridded.rename(
        {z_regridded_dim: output_dim}
    ).coords[output_dim]
    return remapped 
Example #9
Source File: longitude.py    From aospy with Apache License 2.0 5 votes vote down vote up
def __lt__(self, other):
        if isinstance(other, Longitude):
            if self.hemisphere == 'W':
                if other.hemisphere == 'E':
                    return True
                else:
                    return self.longitude > other.longitude
            else:
                if other.hemisphere == 'W':
                    return False
                else:
                    return self.longitude < other.longitude
        else:
            return xr.apply_ufunc(np.greater, other, self) 
Example #10
Source File: vertical_remapping.py    From xarrayutils with MIT License 5 votes vote down vote up
def _groupby_vert(data, group_data, bins):
    # Replicates the behaviour of xarrays `groupby_bins` along one dimension
    # with numpy
    axis = -1  # xr.apply_ufunc transposes core dims to the end
    layers = []

    for b in range(len(bins) - 1):
        bb = [bins[b], bins[b + 1]]
        # This should be customizable like in groupby
        mask = np.logical_and(bb[0] < group_data, bb[1] >= group_data)
        data_masked = data.copy()
        data_masked[~mask] = np.nan
        nanmask = np.all(~mask, axis=axis)
        # There were some problems with passing the function as func=...
        # kwarg. So for now I will hardcode the solution
        # layer = func(data_masked, axis=axis)
        layer = np.nansum(data_masked, axis=axis)
        # there might be an exeption when this is run on a 1d vector...
        # but for now this works...
        # I formerly did this with ma.masked_array,
        # but somehow that did not work with apply_ufunc

        # special treatment for 1d input arrays
        if not isinstance(layer, np.ndarray):
            layer = np.array(layer)

        layer[nanmask] = np.nan
        layers.append(layer)
    return np.stack(layers, axis=-1) 
Example #11
Source File: filtering.py    From xarrayutils with MIT License 5 votes vote down vote up
def filter_1D(data, std, dim="time", dtype=None):
    if astropy is None:
        raise RuntimeError(
            "Module `astropy` not found. Please install optional dependency with `conda install -c conda-forge astropy"
        )

    if dtype is None:
        dtype = detect_dtype(data)

    kernel = Gaussian1DKernel(std)

    def smooth_raw(data):
        raw_data = getattr(data, "values", data)
        result = convolve_fft(raw_data, kernel, boundary="wrap")
        result[np.isnan(raw_data)] = np.nan
        return result

    def temporal_smoother(data):
        dims = [dim]
        return xr.apply_ufunc(
            smooth_raw,
            data,
            vectorize=True,
            dask="parallelized",
            input_core_dims=[dims],
            output_core_dims=[dims],
            output_dtypes=[dtype],
        )

    return temporal_smoother(data) 
Example #12
Source File: filtering.py    From xarrayutils with MIT License 5 votes vote down vote up
def filter_2D(data, std, dim, dtype=None):
    if astropy is None:
        raise RuntimeError(
            "Module `astropy` not found. Please install optional dependency with `conda install -c conda-forge astropy"
        )

    if dtype is None:
        dtype = detect_dtype(data)

    kernel = Gaussian2DKernel(std)

    def smooth_raw(data):
        raw_data = getattr(data, "values", data)
        result = convolve_fft(raw_data, kernel, boundary="wrap")
        result[np.isnan(raw_data)] = np.nan
        return result

    def smoother(data):
        dims = dim
        # this is different from the 1d case

        return xr.apply_ufunc(
            smooth_raw,
            data,
            vectorize=True,
            dask="parallelized",
            input_core_dims=[dims],
            output_core_dims=[dims],
            output_dtypes=[dtype],
        )

    return smoother(data)


# TODO spatial filter 
Example #13
Source File: run_length.py    From xclim with Apache License 2.0 5 votes vote down vote up
def windowed_run_count_ufunc(x: Sequence[bool], window: int) -> xr.apply_ufunc:
    """Dask-parallel version of windowed_run_count_1d, ie the number of consecutive true values in
    array for runs at least as long as given duration.

    Parameters
    ----------
    x : Sequence[bool]
      Input array (bool)
    window : int
      Minimum duration of consecutive run to accumulate values.

    Returns
    -------
    out : func
      A function operating along the time dimension of a dask-array.
    """
    return xr.apply_ufunc(
        windowed_run_count_1d,
        x,
        input_core_dims=[["time"]],
        vectorize=True,
        dask="parallelized",
        output_dtypes=[np.int],
        keep_attrs=True,
        kwargs={"window": window},
    ) 
Example #14
Source File: cnmf.py    From minian with GNU General Public License v3.0 5 votes vote down vote up
def get_noise_fft(varr, noise_range=(0.25, 0.5), noise_method='logmexp'):
    sn = xr.apply_ufunc(
        _noise_fft,
        varr.chunk(dict(frame=-1)),
        input_core_dims=[['frame']],
        output_core_dims=[[]],
        dask='parallelized',
        vectorize=True,
        kwargs=dict(
            noise_range=noise_range,
            noise_method=noise_method),
        output_dtypes=[np.float]
    )
    return sn 
Example #15
Source File: motion_correction.py    From minian with GNU General Public License v3.0 5 votes vote down vote up
def mask_shifts(varr_fft, corr, shifts, z_thres, perframe=True, pad_f=1):
    dims = list(varr_fft.dims)
    dims.remove('frame')
    sizes = [varr_fft.sizes[d] for d in dims]
    pad_s = np.array(sizes) * pad_f
    pad_s = pad_s.astype(int)
    mask = xr.apply_ufunc(zscore, corr.fillna(0)) > z_thres
    shifts = shifts.where(mask)
    if perframe:
        mask_diff = xr.DataArray(
            np.diff(mask.astype(int)),
            coords=dict(frame=mask.coords['frame'][1:]),
            dims=['frame'])
        good_idx = mask.coords['frame'].where(mask > 0, drop=True)
        bad_idx = mask_diff.coords['frame'].where(mask_diff == -1, drop=True)
        for cur_bad in bad_idx:
            gb_diff = good_idx - cur_bad
            try:
                next_good = gb_diff[gb_diff > 0].min() + cur_bad
                last_good = gb_diff[gb_diff < 0].max() + cur_bad
                cur_src = varr_fft.sel(frame=last_good)
                cur_dst = varr_fft.sel(frame=next_good)
                res = shift_fft(cur_src, cur_dst, pad_s, pad_f)
                shifts.loc[dict(frame=next_good.values)] = res[0:2]
            except (KeyError, ValueError):
                print("unable to correct for bad frame: {}".format(int(cur_bad)))
    return shifts, mask 
Example #16
Source File: motion_correction.py    From minian with GNU General Public License v3.0 5 votes vote down vote up
def apply_shifts(varr, shifts):
    sh_dim = shifts.coords['variable'].values.tolist()
    varr_sh = xr.apply_ufunc(
        shift_perframe,
        varr.chunk({d: -1 for d in sh_dim}),
        shifts,
        input_core_dims=[sh_dim, ['variable']],
        output_core_dims=[sh_dim],
        vectorize=True,
        dask = 'parallelized',
        output_dtypes = [varr.dtype])
    return varr_sh 
Example #17
Source File: preprocessing.py    From minian with GNU General Public License v3.0 5 votes vote down vote up
def detect_brightspot(varray, thres=None, window=50, step=10):
    print("detecting brightspot")
    spots = xr.DataArray(
        varray.sel(frame=0)).reset_coords(drop=True).astype(int)
    spots.values = np.zeros_like(spots.values)
    meanfm = varray.mean(dim='frame')
    for ih, ph in meanfm.rolling(height=window):
        if ih % step == 0:
            for iw, pw in ph.rolling(width=window):
                if (iw % step == 0 and pw.sizes['height'] == window
                        and pw.sizes['width'] == window):
                    mean_z = xr.apply_ufunc(zscore, pw)
                    if not thres:
                        cur_thres = -mean_z.min().values
                    else:
                        cur_thres = thres
                    spots.loc[{
                        'height': slice(ih - window + 1, ih),
                        'width': slice(iw - window + 1, iw)
                    }] += mean_z > cur_thres
                    print(
                        ("processing window at {:3d}, {:3d}"
                         " using threshold: {:03.2f}").format(
                             int(ih), int(iw), float(cur_thres)),
                        end='\r')
    print("\nbrightspot detection done")
    return spots 
Example #18
Source File: preprocessing.py    From minian with GNU General Public License v3.0 5 votes vote down vote up
def remove_background(varr, method, wnd):
    selem = disk(wnd)
    res = xr.apply_ufunc(
        remove_background_perframe,
        varr.chunk(dict(height=-1, width=-1)),
        input_core_dims=[['height', 'width']],
        output_core_dims=[['height', 'width']],
        vectorize=True,
        dask='parallelized',
        output_dtypes=[varr.dtype],
        kwargs=dict(method=method, wnd=wnd, selem=selem))
    return res.rename(varr.name + "_subtracted") 
Example #19
Source File: preprocessing.py    From minian with GNU General Public License v3.0 5 votes vote down vote up
def gradient_norm(varr):
    return xr.apply_ufunc(
        gradient_norm_perframe,
        varr,
        input_core_dims=[['height', 'width']],
        output_core_dims=[['height', 'width']],
        vectorize=True,
        dask='parallelized',
        output_dtypes=[varr.dtype]).rename(varr.name + '_gradient') 
Example #20
Source File: preprocessing.py    From minian with GNU General Public License v3.0 5 votes vote down vote up
def remove_brightspot(varr, thres=3):
    k_mean = ski.morphology.diamond(1)
    k_mean[1, 1] = 0
    k_mean = k_mean / 4
    return xr.apply_ufunc(
        remove_brightspot_perframe,
        varr.chunk(dict(height=-1, width=-1)),
        input_core_dims=[['height', 'width']],
        output_core_dims=[['height', 'width']],
        vectorize=True,
        dask='parallelized',
        kwargs=dict(k_mean=k_mean, thres=thres),
        output_dtypes=[varr.dtype]).rename(varr.name + '_clean') 
Example #21
Source File: visualization.py    From minian with GNU General Public License v3.0 5 votes vote down vote up
def update_AC(self, usub=None):
        if usub is None:
            usub = self.strm_usub.usub
        if usub:
            if self._useAC:
                umask = ((self.A_sub.sel(unit_id=usub) > 0)
                         .any('unit_id'))
                A_sub = (self.A_sub.sel(unit_id=usub)
                         .where(umask, drop=True).fillna(0))
                C_sub = self.C_sub.sel(unit_id=usub)
                AC = xr.apply_ufunc(
                    da.dot,
                    A_sub, C_sub,
                    input_core_dims=[['height', 'width', 'unit_id'], ['unit_id', 'frame']],
                    output_core_dims=[['height', 'width', 'frame']],
                    dask='allowed')
                self._AC = AC.compute()
                wndh, wndw = AC.coords['height'].values, AC.coords['width'].values
                window = self.A_sub.sel(
                    height=slice(wndh.min(), wndh.max()),
                    width=slice(wndw.min(), wndw.max()))
                self._AC = self._AC.reindex_like(window).fillna(0)
                self._mov = (self.org_sub.reindex_like(window)).compute()
            else:
                self._AC = self.A_sub.sel(unit_id=usub).sum('unit_id')
                self._mov = self.org_sub
            self.strm_f.event(x=0)
        else:
            self._AC = xr.DataArray([])
            self._mov = xr.DataArray([])
            self.strm_f.event(x=0) 
Example #22
Source File: visualization.py    From minian with GNU General Public License v3.0 5 votes vote down vote up
def centroid(A, verbose=False):
    def rel_cent(im):
        im_nan = np.isnan(im)
        if im_nan.all():
            return np.array([np.nan, np.nan])
        if im_nan.any():
            im = np.nan_to_num(im)
        cent = np.array(center_of_mass(im))
        return cent / im.shape
    gu_rel_cent = da.gufunc(
        rel_cent,
        signature='(h,w)->(d)',
        output_dtypes=float,
        output_sizes=dict(d=2),
        vectorize=True
    )
    cents = (xr.apply_ufunc(
        gu_rel_cent, A.chunk(dict(height=-1, width=-1)),
        input_core_dims=[['height', 'width']],
        output_core_dims=[['dim']],
        dask='allowed')
             .assign_coords(dim=['height', 'width']))
    if verbose:
        print("computing centroids")
        with ProgressBar():
            cents=cents.compute()
    cents_df = (cents.rename('cents').to_series().dropna()
                .unstack('dim').rename_axis(None, axis='columns')
                .reset_index())
    h_rg = (A.coords['height'].min().values, A.coords['height'].max().values)
    w_rg = (A.coords['width'].min().values, A.coords['width'].max().values)
    cents_df['height'] = cents_df['height'] * (h_rg[1] - h_rg[0]) + h_rg[0]
    cents_df['width'] = cents_df['width'] * (w_rg[1] - w_rg[0]) + w_rg[0]
    return cents_df 
Example #23
Source File: initialization.py    From minian with GNU General Public License v3.0 5 votes vote down vote up
def seeds_init(varr, wnd_size=500, method='rolling', stp_size=200, nchunk=100, max_wnd=10, diff_thres=2):
    print("constructing chunks")
    idx_fm = varr.coords['frame']
    nfm = len(idx_fm)
    if method == 'rolling':
        nstp = np.ceil(nfm / stp_size) + 1
        centers = np.linspace(0, nfm - 1, nstp)
        hwnd = np.ceil(wnd_size / 2)
        max_idx = list(
            map(lambda c: slice(int(np.floor(c - hwnd).clip(0)), int(np.ceil(c + hwnd))),
                centers))
    elif method == 'random':
        max_idx = [
            np.random.randint(0, nfm - 1, wnd_size) for _ in range(nchunk)
        ]
    res = []
    print("creating parallel scheme")
    res = [max_proj_frame(varr, cur_idx) for cur_idx in max_idx]
    max_res = xr.concat(res, 'sample').chunk(dict(sample=10))
    print("computing max projections")
    max_res = max_res.persist()
    print("calculating local maximum")
    loc_max = xr.apply_ufunc(
        local_max_roll,
        max_res.chunk(dict(height=-1, width=-1)),
        input_core_dims=[['height', 'width']],
        output_core_dims=[['height', 'width']],
        vectorize=True,
        dask='parallelized',
        output_dtypes=[np.uint8],
        kwargs=dict(k0=2, k1=max_wnd, diff=diff_thres)).sum('sample')
    loc_max = loc_max.compute()
    loc_max_flt = loc_max.stack(spatial=['height', 'width'])
    seeds = (loc_max_flt.where(loc_max_flt > 0, drop=True)
             .rename('seeds').to_dataframe().reset_index())
    return seeds[['height', 'width', 'seeds']].reset_index() 
Example #24
Source File: initialization.py    From minian with GNU General Public License v3.0 5 votes vote down vote up
def gmm_refine(varr, seeds, q=(0.1, 99.9), n_components=2, valid_components=1, mean_mask=True):
    print("selecting seeds")
    varr_sub = varr.sel(
        spatial=[tuple(hw) for hw in seeds[['height', 'width']].values])
    print("computing peak-valley values")
    varr_valley = xr.apply_ufunc(
        np.percentile,
        varr_sub.chunk(dict(frame=-1)),
        input_core_dims=[['frame']],
        kwargs=dict(q=q[0], axis=-1),
        dask='parallelized',
        output_dtypes=[varr_sub.dtype])
    varr_peak = xr.apply_ufunc(
        np.percentile,
        varr_sub.chunk(dict(frame=-1)),
        input_core_dims=[['frame']],
        kwargs=dict(q=q[1], axis=-1),
        dask='parallelized',
        output_dtypes=[varr_sub.dtype])
    varr_pv = varr_peak - varr_valley
    varr_pv = varr_pv.compute()
    print("fitting GMM models")
    dat = varr_pv.values.reshape(-1, 1)
    gmm = GaussianMixture(n_components=n_components)
    gmm.fit(dat)
    idg = np.argsort(gmm.means_.reshape(-1))[-valid_components:]
    idx_valid = np.isin(gmm.predict(dat), idg)
    if mean_mask:
        idx_mean = dat > np.sort(gmm.means_)[0]
        idx_valid = np.logical_and(idx_mean.squeeze(), idx_valid)
    seeds['mask_gmm'] = idx_valid
    return seeds, varr_pv, gmm 
Example #25
Source File: stats_utils.py    From arviz with Apache License 2.0 5 votes vote down vote up
def update_docstring(ufunc, func, n_output=1):
    """Update ArviZ generated ufunc docstring."""
    module = ""
    name = ""
    docstring = ""
    if hasattr(func, "__module__") and isinstance(func.__module__, str):
        module += func.__module__
    if hasattr(func, "__name__"):
        name += func.__name__
    if hasattr(func, "__doc__") and isinstance(func.__doc__, str):
        docstring += func.__doc__
    ufunc.__doc__ += "\n\n"
    if module or name:
        ufunc.__doc__ += "This function is a ufunc wrapper for "
        ufunc.__doc__ += module + "." + name
        ufunc.__doc__ += "\n"
    ufunc.__doc__ += 'Call ufunc with n_args from xarray against "chain" and "draw" dimensions:'
    ufunc.__doc__ += "\n\n"
    input_core_dims = 'tuple(("chain", "draw") for _ in range(n_args))'
    if n_output > 1:
        output_core_dims = " tuple([] for _ in range({}))".format(n_output)
        msg = "xr.apply_ufunc(ufunc, dataset, input_core_dims={}, output_core_dims={})"
        ufunc.__doc__ += msg.format(input_core_dims, output_core_dims)
    else:
        output_core_dims = ""
        msg = "xr.apply_ufunc(ufunc, dataset, input_core_dims={})"
        ufunc.__doc__ += msg.format(input_core_dims)
    ufunc.__doc__ += "\n\n"
    ufunc.__doc__ += "For example: np.std(data, ddof=1) --> n_args=2"
    if docstring:
        ufunc.__doc__ += "\n\n"
        ufunc.__doc__ += module
        ufunc.__doc__ += name
        ufunc.__doc__ += " docstring:"
        ufunc.__doc__ += "\n\n"
        ufunc.__doc__ += docstring 
Example #26
Source File: longitude.py    From aospy with Apache License 2.0 5 votes vote down vote up
def __eq__(self, other):
        if isinstance(other, Longitude):
            return (self.hemisphere == other.hemisphere and
                    self.longitude == other.longitude)
        else:
            return xr.apply_ufunc(np.equal, other, self) 
Example #27
Source File: longitude.py    From aospy with Apache License 2.0 5 votes vote down vote up
def __gt__(self, other):
        if isinstance(other, Longitude):
            if self.hemisphere == 'W':
                if other.hemisphere == 'E':
                    return False
                else:
                    return self.longitude < other.longitude
            else:
                if other.hemisphere == 'W':
                    return True
                else:
                    return self.longitude > other.longitude
        else:
            return xr.apply_ufunc(np.less, other, self) 
Example #28
Source File: longitude.py    From aospy with Apache License 2.0 5 votes vote down vote up
def __le__(self, other):
        if isinstance(other, Longitude):
            return self < other or self == other
        else:
            return xr.apply_ufunc(np.greater_equal, other, self) 
Example #29
Source File: longitude.py    From aospy with Apache License 2.0 5 votes vote down vote up
def __ge__(self, other):
        if isinstance(other, Longitude):
            return self > other or self == other
        else:
            return xr.apply_ufunc(np.less_equal, other, self) 
Example #30
Source File: probabilistic.py    From xskillscore with Apache License 2.0 5 votes vote down vote up
def xr_brier_score(observations, forecasts):
    """
    xarray version of properscoring.brier_score: Calculate Brier score (BS).
    ..math:
        BS(p, k) = (p_1 - k)^2,
    Parameters
    ----------
    observations : xarray.Dataset or xarray.DataArray
        The observations or set of observations.
    forecasts : xarray.Dataset or xarray.DataArray
        The forecasts associated with the observations.
    Returns
    -------
    xarray.Dataset or xarray.DataArray
    References
    ----------
    Gneiting, Tilmann, and Adrian E Raftery. “Strictly Proper Scoring Rules,
      Prediction, and Estimation.” Journal of the American Statistical
      Association 102, no. 477 (March 1, 2007): 359–78.
      https://doi.org/10/c6758w.
    See Also
    --------
    properscoring.brier_score
    xarray.apply_ufunc
    """
    return xr.apply_ufunc(
        brier_score,
        observations,
        forecasts,
        input_core_dims=[[], []],
        dask='parallelized',
        output_dtypes=[float],
    )