Python numpy.ptp() Examples

The following are 30 code examples for showing how to use numpy.ptp(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: pyLucid   Author: yelantingfeng   File: lucidDream.py    License: MIT License 6 votes vote down vote up
def spline_transform_multi(img, mask):
    bimask=mask>0
    M,N=np.where(bimask)
    w=np.ptp(N)+1
    h=np.ptp(M)+1
    kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    bound=cv2.dilate(bimask.astype('uint8'),kernel)-bimask
    y,x=np.where(bound>0)

    if x.size>4:
        newxy=thin_plate_transform(x,y,w,h,mask.shape[:2],num_points=5)

        new_img=cv2.remap(img,newxy,None,cv2.INTER_LINEAR)
        new_msk=cv2.remap(mask,newxy,None,cv2.INTER_NEAREST)
    elif x.size>0:
        new_img=img
        new_msk=mask
    return new_img,new_msk 
Example 2
Project: ratcave   Author: ratcave   File: collision.py    License: MIT License 6 votes vote down vote up
def __init__(self, parent=None, visible=True, drawmode=gl.GL_LINES, position=(0, 0, 0), **kwargs):
        """Calculates collision by checking if a point is inside a sphere around the mesh vertices."""
        # kwargs['scale'] = np.ptp(parent.vertices, axis=0).max() / 2 if 'scale' not in kwargs else kwargs['scale']
        # kwargs['']

        from .wavefront import WavefrontReader
        from .resources import obj_primitives
        reader = WavefrontReader(obj_primitives)
        body = reader.bodies[self.primitive_shape]
        vertices, normals, texcoords = body['v'], body['vn'], body['vt']

        super(ColliderBase, self).__init__(arrays=[vertices, normals, texcoords],
                                           drawmode=drawmode, visible=visible, position=position,
                                           **kwargs)
        self.uniforms['diffuse'] = 1., 0, 0

        # Changes Scenegraph.parent execution order so self.scale can occur in the CollisionChecker parent property.
        if parent:
            self.parent = parent 
Example 3
Project: recruit   Author: Frank-qlu   File: histograms.py    License: Apache License 2.0 6 votes vote down vote up
def _hist_bin_sqrt(x, range):
    """
    Square root histogram bin estimator.

    Bin width is inversely proportional to the data size. Used by many
    programs for its simplicity.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / np.sqrt(x.size) 
Example 4
Project: recruit   Author: Frank-qlu   File: histograms.py    License: Apache License 2.0 6 votes vote down vote up
def _hist_bin_sturges(x, range):
    """
    Sturges histogram bin estimator.

    A very simplistic estimator based on the assumption of normality of
    the data. This estimator has poor performance for non-normal data,
    which becomes especially obvious for large data sets. The estimate
    depends only on size of the data.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / (np.log2(x.size) + 1.0) 
Example 5
Project: recruit   Author: Frank-qlu   File: histograms.py    License: Apache License 2.0 6 votes vote down vote up
def _hist_bin_rice(x, range):
    """
    Rice histogram bin estimator.

    Another simple estimator with no normality assumption. It has better
    performance for large data than Sturges, but tends to overestimate
    the number of bins. The number of bins is proportional to the cube
    root of data size (asymptotically optimal). The estimate depends
    only on size of the data.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / (2.0 * x.size ** (1.0 / 3)) 
Example 6
Project: mars   Author: mars-project   File: test_statistics_execute.py    License: Apache License 2.0 6 votes vote down vote up
def testPtpExecution(self):
        x = arange(4, chunk_size=1).reshape(2, 2)

        t = ptp(x, axis=0)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.ptp(np.arange(4).reshape(2, 2), axis=0)
        np.testing.assert_equal(res, expected)

        t = ptp(x, axis=1)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.ptp(np.arange(4).reshape(2, 2), axis=1)
        np.testing.assert_equal(res, expected)

        t = ptp(x)

        res = self.executor.execute_tensor(t)[0]
        expected = np.ptp(np.arange(4).reshape(2, 2))
        np.testing.assert_equal(res, expected) 
Example 7
Project: kits19.MIScnn   Author: muellerdo   File: visualizer.py    License: GNU General Public License v3.0 6 votes vote down vote up
def overlay_segmentation(vol, seg):
    # Scale volume to greyscale range
    vol_greyscale = (255*(vol - np.min(vol))/np.ptp(vol)).astype(int)
    # Convert volume to RGB
    vol_rgb = np.stack([vol_greyscale, vol_greyscale, vol_greyscale], axis=-1)
    # Initialize segmentation in RGB
    shp = seg.shape
    seg_rgb = np.zeros((shp[0], shp[1], shp[2], 3), dtype=np.int)
    # Set class to appropriate color
    seg_rgb[np.equal(seg, 1)] = [255, 0,   0]
    seg_rgb[np.equal(seg, 2)] = [0,   0, 255]
    # Get binary array for places where an ROI lives
    segbin = np.greater(seg, 0)
    repeated_segbin = np.stack((segbin, segbin, segbin), axis=-1)
    # Weighted sum where there's a value to overlay
    alpha = 0.3
    vol_overlayed = np.where(
        repeated_segbin,
        np.round(alpha*seg_rgb+(1-alpha)*vol_rgb).astype(np.uint8),
        np.round(vol_rgb).astype(np.uint8)
    )
    # Return final volume with segmentation overlay
    return vol_overlayed 
Example 8
Project: Generative-Adversarial-Networks-Cookbook   Author: PacktPublishing   File: train.py    License: MIT License 6 votes vote down vote up
def plot_checkpoint(self,e):
        filename = "/data/sample_"+str(e)+".png"

        noise = self.sample_latent_space(16)
        images = self.generator.Generator.predict(noise)
        
        plt.figure(figsize=(10,10))
        for i in range(images.shape[0]):
            plt.subplot(4, 4, i+1)
            if self.C==1:
                image = images[i, :, :]
                image = np.reshape(image, [self.H,self.W])
                image = (255*(image - np.min(image))/np.ptp(image)).astype(int)
                plt.imshow(image,cmap='gray')
            elif self.C==3:
                image = images[i, :, :, :]
                image = np.reshape(image, [self.H,self.W,self.C])
                image = (255*(image - np.min(image))/np.ptp(image)).astype(int)
                plt.imshow(image)
            
            plt.axis('off')
        plt.tight_layout()
        plt.savefig(filename)
        plt.close('all')
        return 
Example 9
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: histograms.py    License: MIT License 6 votes vote down vote up
def _hist_bin_sqrt(x, range):
    """
    Square root histogram bin estimator.

    Bin width is inversely proportional to the data size. Used by many
    programs for its simplicity.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / np.sqrt(x.size) 
Example 10
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: histograms.py    License: MIT License 6 votes vote down vote up
def _hist_bin_sturges(x, range):
    """
    Sturges histogram bin estimator.

    A very simplistic estimator based on the assumption of normality of
    the data. This estimator has poor performance for non-normal data,
    which becomes especially obvious for large data sets. The estimate
    depends only on size of the data.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / (np.log2(x.size) + 1.0) 
Example 11
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: histograms.py    License: MIT License 6 votes vote down vote up
def _hist_bin_rice(x, range):
    """
    Rice histogram bin estimator.

    Another simple estimator with no normality assumption. It has better
    performance for large data than Sturges, but tends to overestimate
    the number of bins. The number of bins is proportional to the cube
    root of data size (asymptotically optimal). The estimate depends
    only on size of the data.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / (2.0 * x.size ** (1.0 / 3)) 
Example 12
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: tritools.py    License: MIT License 6 votes vote down vote up
def scale_factors(self):
        """
        Factors to rescale the triangulation into a unit square.

        Returns *k*, tuple of 2 scale factors.

        Returns
        -------
        k : tuple of 2 floats (kx, ky)
            Tuple of floats that would rescale the triangulation :
            ``[triangulation.x * kx, triangulation.y * ky]``
            fits exactly inside a unit square.

        """
        compressed_triangles = self._triangulation.get_masked_triangles()
        node_used = (np.bincount(np.ravel(compressed_triangles),
                                 minlength=self._triangulation.x.size) != 0)
        return (1 / np.ptp(self._triangulation.x[node_used]),
                1 / np.ptp(self._triangulation.y[node_used])) 
Example 13
Project: mne-features   Author: mne-tools   File: univariate.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def compute_ptp_amp(data):
    """Peak-to-peak (PTP) amplitude of the data (per channel).

    Parameters
    ----------
    data : ndarray, shape (n_channels, n_times)

    Returns
    -------
    output : ndarray, shape (n_channels,)

    Notes
    -----
    Alias of the feature function: **ptp_amp**
    """
    return np.ptp(data, axis=-1) 
Example 14
Project: GraphicDesignPatternByPython   Author: Relph1119   File: tritools.py    License: MIT License 6 votes vote down vote up
def scale_factors(self):
        """
        Factors to rescale the triangulation into a unit square.

        Returns *k*, tuple of 2 scale factors.

        Returns
        -------
        k : tuple of 2 floats (kx, ky)
            Tuple of floats that would rescale the triangulation :
            ``[triangulation.x * kx, triangulation.y * ky]``
            fits exactly inside a unit square.

        """
        compressed_triangles = self._triangulation.get_masked_triangles()
        node_used = (np.bincount(np.ravel(compressed_triangles),
                                 minlength=self._triangulation.x.size) != 0)
        return (1 / np.ptp(self._triangulation.x[node_used]),
                1 / np.ptp(self._triangulation.y[node_used])) 
Example 15
Project: python3_ios   Author: holzschu   File: tritools.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def scale_factors(self):
        """
        Factors to rescale the triangulation into a unit square.

        Returns *k*, tuple of 2 scale factors.

        Returns
        -------
        k : tuple of 2 floats (kx, ky)
            Tuple of floats that would rescale the triangulation :
            ``[triangulation.x * kx, triangulation.y * ky]``
            fits exactly inside a unit square.

        """
        compressed_triangles = self._triangulation.get_masked_triangles()
        node_used = (np.bincount(np.ravel(compressed_triangles),
                                 minlength=self._triangulation.x.size) != 0)
        return (1 / np.ptp(self._triangulation.x[node_used]),
                1 / np.ptp(self._triangulation.y[node_used])) 
Example 16
def _hist_bin_sqrt(x, range):
    """
    Square root histogram bin estimator.

    Bin width is inversely proportional to the data size. Used by many
    programs for its simplicity.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / np.sqrt(x.size) 
Example 17
def _hist_bin_sturges(x, range):
    """
    Sturges histogram bin estimator.

    A very simplistic estimator based on the assumption of normality of
    the data. This estimator has poor performance for non-normal data,
    which becomes especially obvious for large data sets. The estimate
    depends only on size of the data.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / (np.log2(x.size) + 1.0) 
Example 18
def _hist_bin_rice(x, range):
    """
    Rice histogram bin estimator.

    Another simple estimator with no normality assumption. It has better
    performance for large data than Sturges, but tends to overestimate
    the number of bins. The number of bins is proportional to the cube
    root of data size (asymptotically optimal). The estimate depends
    only on size of the data.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / (2.0 * x.size ** (1.0 / 3)) 
Example 19
Project: autoreject   Author: autoreject   File: autoreject.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def fit(self, X, y=None):
        """Fit it.

        Parameters
        ----------
        X : array, shape (n_epochs, n_times)
            The data for one channel.
        y : None
            Redundant. Necessary to be compatible with sklearn
            API.
        """
        deltas = np.ptp(X, axis=1)
        self.deltas_ = deltas
        keep = deltas <= self.thresh
        # XXX: actually go over all the folds before setting the min
        # in skopt. Otherwise, may confuse skopt.
        if self.thresh < np.min(np.ptp(X, axis=1)):
            assert np.sum(keep) == 0
            keep = deltas <= np.min(np.ptp(X, axis=1))
        self.mean_ = _slicemean(X, keep, axis=0)
        return self 
Example 20
Project: autoreject   Author: autoreject   File: autoreject.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _vote_bad_epochs(self, epochs, picks):
        """Each channel votes for an epoch as good or bad.

        Parameters
        ----------
        epochs : instance of mne.Epochs
            The epochs object for which bad epochs must be found.
        picks : array-like
            The indices of the channels to consider.
        """
        labels = np.zeros((len(epochs), len(epochs.ch_names)))
        labels.fill(np.nan)
        bad_sensor_counts = np.zeros((len(epochs),))

        this_ch_names = [epochs.ch_names[p] for p in picks]
        deltas = np.ptp(epochs.get_data()[:, picks], axis=-1).T
        threshes = [self.threshes_[ch_name] for ch_name in this_ch_names]
        for ch_idx, (delta, thresh) in enumerate(zip(deltas, threshes)):
            bad_epochs_idx = np.where(delta > thresh)[0]
            labels[:, picks[ch_idx]] = 0
            labels[bad_epochs_idx, picks[ch_idx]] = 1

        bad_sensor_counts = np.sum(labels == 1, axis=1)
        return labels, bad_sensor_counts 
Example 21
Project: ASR33   Author: hughpyle   File: prep_ascii.py    License: MIT License 6 votes vote down vote up
def load_image(filename):
    if filename in loaded_files:
        return loaded_files[filename]
    img = imageio.imread(filename, as_gray=True).astype(np.float)

    # Normalize the whole image
    # img *= 1.0/(img.max() - img.min())
    img = (img - np.min(img))/np.ptp(img)

    # Normalize on a sigmoid curve to better separate ink from paper
    k = 10
    img = np.sqrt(1 / (1 + np.exp(k * (img - 0.5))))

    loaded_files[filename] = img
    return img


# Pull out the image of a single character.
# Each character has multiple images, specify index (0-5) to choose one 
Example 22
Project: plotnine   Author: has2k1   File: stat_ydensity.py    License: GNU General Public License v2.0 6 votes vote down vote up
def compute_group(cls, data, scales, **params):
        n = len(data)
        if n == 0:
            return pd.DataFrame()

        weight = data.get('weight')

        if params['trim']:
            range_y = data['y'].min(), data['y'].max()
        else:
            range_y = scales.y.dimension()

        dens = compute_density(data['y'], weight, range_y, **params)
        dens['y'] = dens['x']
        dens['x'] = np.mean([data['x'].min(), data['x'].max()])

        # Compute width if x has multiple values
        if len(np.unique(data['x'])) > 1:
            dens['width'] = np.ptp(data['x']) * 0.9

        return dens 
Example 23
Project: DABEST-python   Author: ACCLAB   File: old_test_plotting.py    License: BSD 3-Clause Clear License 6 votes vote down vote up
def test_swarmspan():
    print('Testing swarmspans')

    base_mean = np.random.randint(10, 101)
    seed, ptp, df = create_dummy_dataset(base_mean=base_mean)

    print('\nSeed = {}; base mean = {}'.format(seed, base_mean))

    for c in df.columns[1:-1]:
        print('{}...'.format(c))

        f1, swarmplt = plt.subplots(1)
        sns.swarmplot(data=df[[df.columns[0], c]], ax=swarmplt)
        sns_yspans = []
        for coll in swarmplt.collections:
            sns_yspans.append(get_swarm_yspans(coll))

        f2, b = _api.plot(data=df, idx=(df.columns[0], c))
        dabest_yspans = []
        for coll in f2.axes[0].collections:
            dabest_yspans.append(get_swarm_yspans(coll))

        for j, span in enumerate(sns_yspans):
            assert span == pytest.approx(dabest_yspans[j]) 
Example 24
Project: ratcave   Author: ratcave   File: collision.py    License: MIT License 5 votes vote down vote up
def _fit_to_parent_vertices(self, vertices):
        return np.ptp(vertices, axis=0) / 2 
Example 25
Project: ratcave   Author: ratcave   File: collision.py    License: MIT License 5 votes vote down vote up
def _fit_to_parent_vertices(self, vertices):
        return np.ptp(vertices, axis=0) / 2 
Example 26
Project: ratcave   Author: ratcave   File: collision.py    License: MIT License 5 votes vote down vote up
def _fit_to_parent_vertices(self, vertices, scale_gain=1e5):
        axes = [a for a in range(3) if a != self.ignore_axis]
        x, z = np.ptp(vertices[:, axes], axis=0) / 2
        return x, scale_gain, z  # scale_gain makes it clear in the display that one dimension is being ignored. 
Example 27
Project: recruit   Author: Frank-qlu   File: histograms.py    License: Apache License 2.0 5 votes vote down vote up
def _hist_bin_stone(x, range):
    """
    Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).

    The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
    The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
    https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule

    This paper by Stone appears to be the origination of this rule.
    http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.
    range : (float, float)
        The lower and upper range of the bins.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """

    n = x.size
    ptp_x = np.ptp(x)
    if n <= 1 or ptp_x == 0:
        return 0

    def jhat(nbins):
        hh = ptp_x / nbins
        p_k = np.histogram(x, bins=nbins, range=range)[0] / n
        return (2 - (n + 1) * p_k.dot(p_k)) / hh

    nbins_upper_bound = max(100, int(np.sqrt(n)))
    nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
    if nbins == nbins_upper_bound:
        warnings.warn("The number of bins estimated may be suboptimal.", RuntimeWarning, stacklevel=2)
    return ptp_x / nbins 
Example 28
Project: recruit   Author: Frank-qlu   File: histograms.py    License: Apache License 2.0 5 votes vote down vote up
def _hist_bin_doane(x, range):
    """
    Doane's histogram bin estimator.

    Improved version of Sturges' formula which works better for
    non-normal data. See
    stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    if x.size > 2:
        sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
        sigma = np.std(x)
        if sigma > 0.0:
            # These three operations add up to
            # g1 = np.mean(((x - np.mean(x)) / sigma)**3)
            # but use only one temp array instead of three
            temp = x - np.mean(x)
            np.true_divide(temp, sigma, temp)
            np.power(temp, 3, temp)
            g1 = np.mean(temp)
            return x.ptp() / (1.0 + np.log2(x.size) +
                                    np.log2(1.0 + np.absolute(g1) / sg1))
    return 0.0 
Example 29
Project: auto-alt-text-lambda-api   Author: abhisuri97   File: test_numeric.py    License: MIT License 5 votes vote down vote up
def test_ptp(self):
        a = [3, 4, 5, 10, -3, -5, 6.0]
        assert_equal(np.ptp(a, axis=0), 15.0) 
Example 30
Project: vnpy_crypto   Author: birforce   File: fromnumeric.py    License: MIT License 5 votes vote down vote up
def ptp(a, axis=None, out=None):
    """
    Range of values (maximum - minimum) along an axis.

    The name of the function comes from the acronym for 'peak to peak'.

    Parameters
    ----------
    a : array_like
        Input values.
    axis : int, optional
        Axis along which to find the peaks.  By default, flatten the
        array.
    out : array_like
        Alternative output array in which to place the result. It must
        have the same shape and buffer length as the expected output,
        but the type of the output values will be cast if necessary.

    Returns
    -------
    ptp : ndarray
        A new array holding the result, unless `out` was
        specified, in which case a reference to `out` is returned.

    Examples
    --------
    >>> x = np.arange(4).reshape((2,2))
    >>> x
    array([[0, 1],
           [2, 3]])

    >>> np.ptp(x, axis=0)
    array([2, 2])

    >>> np.ptp(x, axis=1)
    array([1, 1])

    """
    return _wrapfunc(a, 'ptp', axis=axis, out=out)