Python numpy.ptp() Examples

The following are 30 code examples of numpy.ptp(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: old_test_plotting.py    From DABEST-python with BSD 3-Clause Clear License 6 votes vote down vote up
def test_swarmspan():
    print('Testing swarmspans')

    base_mean = np.random.randint(10, 101)
    seed, ptp, df = create_dummy_dataset(base_mean=base_mean)

    print('\nSeed = {}; base mean = {}'.format(seed, base_mean))

    for c in df.columns[1:-1]:
        print('{}...'.format(c))

        f1, swarmplt = plt.subplots(1)
        sns.swarmplot(data=df[[df.columns[0], c]], ax=swarmplt)
        sns_yspans = []
        for coll in swarmplt.collections:
            sns_yspans.append(get_swarm_yspans(coll))

        f2, b = _api.plot(data=df, idx=(df.columns[0], c))
        dabest_yspans = []
        for coll in f2.axes[0].collections:
            dabest_yspans.append(get_swarm_yspans(coll))

        for j, span in enumerate(sns_yspans):
            assert span == pytest.approx(dabest_yspans[j]) 
Example #2
Source File: test_statistics_execute.py    From mars with Apache License 2.0 6 votes vote down vote up
def testPtpExecution(self):
        x = arange(4, chunk_size=1).reshape(2, 2)

        t = ptp(x, axis=0)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.ptp(np.arange(4).reshape(2, 2), axis=0)
        np.testing.assert_equal(res, expected)

        t = ptp(x, axis=1)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.ptp(np.arange(4).reshape(2, 2), axis=1)
        np.testing.assert_equal(res, expected)

        t = ptp(x)

        res = self.executor.execute_tensor(t)[0]
        expected = np.ptp(np.arange(4).reshape(2, 2))
        np.testing.assert_equal(res, expected) 
Example #3
Source File: histograms.py    From recruit with Apache License 2.0 6 votes vote down vote up
def _hist_bin_sqrt(x, range):
    """
    Square root histogram bin estimator.

    Bin width is inversely proportional to the data size. Used by many
    programs for its simplicity.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / np.sqrt(x.size) 
Example #4
Source File: histograms.py    From recruit with Apache License 2.0 6 votes vote down vote up
def _hist_bin_sturges(x, range):
    """
    Sturges histogram bin estimator.

    A very simplistic estimator based on the assumption of normality of
    the data. This estimator has poor performance for non-normal data,
    which becomes especially obvious for large data sets. The estimate
    depends only on size of the data.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / (np.log2(x.size) + 1.0) 
Example #5
Source File: histograms.py    From recruit with Apache License 2.0 6 votes vote down vote up
def _hist_bin_rice(x, range):
    """
    Rice histogram bin estimator.

    Another simple estimator with no normality assumption. It has better
    performance for large data than Sturges, but tends to overestimate
    the number of bins. The number of bins is proportional to the cube
    root of data size (asymptotically optimal). The estimate depends
    only on size of the data.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / (2.0 * x.size ** (1.0 / 3)) 
Example #6
Source File: collision.py    From ratcave with MIT License 6 votes vote down vote up
def __init__(self, parent=None, visible=True, drawmode=gl.GL_LINES, position=(0, 0, 0), **kwargs):
        """Calculates collision by checking if a point is inside a sphere around the mesh vertices."""
        # kwargs['scale'] = np.ptp(parent.vertices, axis=0).max() / 2 if 'scale' not in kwargs else kwargs['scale']
        # kwargs['']

        from .wavefront import WavefrontReader
        from .resources import obj_primitives
        reader = WavefrontReader(obj_primitives)
        body = reader.bodies[self.primitive_shape]
        vertices, normals, texcoords = body['v'], body['vn'], body['vt']

        super(ColliderBase, self).__init__(arrays=[vertices, normals, texcoords],
                                           drawmode=drawmode, visible=visible, position=position,
                                           **kwargs)
        self.uniforms['diffuse'] = 1., 0, 0

        # Changes Scenegraph.parent execution order so self.scale can occur in the CollisionChecker parent property.
        if parent:
            self.parent = parent 
Example #7
Source File: visualizer.py    From kits19.MIScnn with GNU General Public License v3.0 6 votes vote down vote up
def overlay_segmentation(vol, seg):
    # Scale volume to greyscale range
    vol_greyscale = (255*(vol - np.min(vol))/np.ptp(vol)).astype(int)
    # Convert volume to RGB
    vol_rgb = np.stack([vol_greyscale, vol_greyscale, vol_greyscale], axis=-1)
    # Initialize segmentation in RGB
    shp = seg.shape
    seg_rgb = np.zeros((shp[0], shp[1], shp[2], 3), dtype=np.int)
    # Set class to appropriate color
    seg_rgb[np.equal(seg, 1)] = [255, 0,   0]
    seg_rgb[np.equal(seg, 2)] = [0,   0, 255]
    # Get binary array for places where an ROI lives
    segbin = np.greater(seg, 0)
    repeated_segbin = np.stack((segbin, segbin, segbin), axis=-1)
    # Weighted sum where there's a value to overlay
    alpha = 0.3
    vol_overlayed = np.where(
        repeated_segbin,
        np.round(alpha*seg_rgb+(1-alpha)*vol_rgb).astype(np.uint8),
        np.round(vol_rgb).astype(np.uint8)
    )
    # Return final volume with segmentation overlay
    return vol_overlayed 
Example #8
Source File: train.py    From Generative-Adversarial-Networks-Cookbook with MIT License 6 votes vote down vote up
def plot_checkpoint(self,e):
        filename = "/data/sample_"+str(e)+".png"

        noise = self.sample_latent_space(16)
        images = self.generator.Generator.predict(noise)
        
        plt.figure(figsize=(10,10))
        for i in range(images.shape[0]):
            plt.subplot(4, 4, i+1)
            if self.C==1:
                image = images[i, :, :]
                image = np.reshape(image, [self.H,self.W])
                image = (255*(image - np.min(image))/np.ptp(image)).astype(int)
                plt.imshow(image,cmap='gray')
            elif self.C==3:
                image = images[i, :, :, :]
                image = np.reshape(image, [self.H,self.W,self.C])
                image = (255*(image - np.min(image))/np.ptp(image)).astype(int)
                plt.imshow(image)
            
            plt.axis('off')
        plt.tight_layout()
        plt.savefig(filename)
        plt.close('all')
        return 
Example #9
Source File: histograms.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def _hist_bin_sqrt(x, range):
    """
    Square root histogram bin estimator.

    Bin width is inversely proportional to the data size. Used by many
    programs for its simplicity.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / np.sqrt(x.size) 
Example #10
Source File: histograms.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def _hist_bin_sturges(x, range):
    """
    Sturges histogram bin estimator.

    A very simplistic estimator based on the assumption of normality of
    the data. This estimator has poor performance for non-normal data,
    which becomes especially obvious for large data sets. The estimate
    depends only on size of the data.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / (np.log2(x.size) + 1.0) 
Example #11
Source File: histograms.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def _hist_bin_rice(x, range):
    """
    Rice histogram bin estimator.

    Another simple estimator with no normality assumption. It has better
    performance for large data than Sturges, but tends to overestimate
    the number of bins. The number of bins is proportional to the cube
    root of data size (asymptotically optimal). The estimate depends
    only on size of the data.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / (2.0 * x.size ** (1.0 / 3)) 
Example #12
Source File: tritools.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def scale_factors(self):
        """
        Factors to rescale the triangulation into a unit square.

        Returns *k*, tuple of 2 scale factors.

        Returns
        -------
        k : tuple of 2 floats (kx, ky)
            Tuple of floats that would rescale the triangulation :
            ``[triangulation.x * kx, triangulation.y * ky]``
            fits exactly inside a unit square.

        """
        compressed_triangles = self._triangulation.get_masked_triangles()
        node_used = (np.bincount(np.ravel(compressed_triangles),
                                 minlength=self._triangulation.x.size) != 0)
        return (1 / np.ptp(self._triangulation.x[node_used]),
                1 / np.ptp(self._triangulation.y[node_used])) 
Example #13
Source File: tritools.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def scale_factors(self):
        """
        Factors to rescale the triangulation into a unit square.

        Returns *k*, tuple of 2 scale factors.

        Returns
        -------
        k : tuple of 2 floats (kx, ky)
            Tuple of floats that would rescale the triangulation :
            ``[triangulation.x * kx, triangulation.y * ky]``
            fits exactly inside a unit square.

        """
        compressed_triangles = self._triangulation.get_masked_triangles()
        node_used = (np.bincount(np.ravel(compressed_triangles),
                                 minlength=self._triangulation.x.size) != 0)
        return (1 / np.ptp(self._triangulation.x[node_used]),
                1 / np.ptp(self._triangulation.y[node_used])) 
Example #14
Source File: univariate.py    From mne-features with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def compute_ptp_amp(data):
    """Peak-to-peak (PTP) amplitude of the data (per channel).

    Parameters
    ----------
    data : ndarray, shape (n_channels, n_times)

    Returns
    -------
    output : ndarray, shape (n_channels,)

    Notes
    -----
    Alias of the feature function: **ptp_amp**
    """
    return np.ptp(data, axis=-1) 
Example #15
Source File: stat_ydensity.py    From plotnine with GNU General Public License v2.0 6 votes vote down vote up
def compute_group(cls, data, scales, **params):
        n = len(data)
        if n == 0:
            return pd.DataFrame()

        weight = data.get('weight')

        if params['trim']:
            range_y = data['y'].min(), data['y'].max()
        else:
            range_y = scales.y.dimension()

        dens = compute_density(data['y'], weight, range_y, **params)
        dens['y'] = dens['x']
        dens['x'] = np.mean([data['x'].min(), data['x'].max()])

        # Compute width if x has multiple values
        if len(np.unique(data['x'])) > 1:
            dens['width'] = np.ptp(data['x']) * 0.9

        return dens 
Example #16
Source File: prep_ascii.py    From ASR33 with MIT License 6 votes vote down vote up
def load_image(filename):
    if filename in loaded_files:
        return loaded_files[filename]
    img = imageio.imread(filename, as_gray=True).astype(np.float)

    # Normalize the whole image
    # img *= 1.0/(img.max() - img.min())
    img = (img - np.min(img))/np.ptp(img)

    # Normalize on a sigmoid curve to better separate ink from paper
    k = 10
    img = np.sqrt(1 / (1 + np.exp(k * (img - 0.5))))

    loaded_files[filename] = img
    return img


# Pull out the image of a single character.
# Each character has multiple images, specify index (0-5) to choose one 
Example #17
Source File: autoreject.py    From autoreject with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _vote_bad_epochs(self, epochs, picks):
        """Each channel votes for an epoch as good or bad.

        Parameters
        ----------
        epochs : instance of mne.Epochs
            The epochs object for which bad epochs must be found.
        picks : array-like
            The indices of the channels to consider.
        """
        labels = np.zeros((len(epochs), len(epochs.ch_names)))
        labels.fill(np.nan)
        bad_sensor_counts = np.zeros((len(epochs),))

        this_ch_names = [epochs.ch_names[p] for p in picks]
        deltas = np.ptp(epochs.get_data()[:, picks], axis=-1).T
        threshes = [self.threshes_[ch_name] for ch_name in this_ch_names]
        for ch_idx, (delta, thresh) in enumerate(zip(deltas, threshes)):
            bad_epochs_idx = np.where(delta > thresh)[0]
            labels[:, picks[ch_idx]] = 0
            labels[bad_epochs_idx, picks[ch_idx]] = 1

        bad_sensor_counts = np.sum(labels == 1, axis=1)
        return labels, bad_sensor_counts 
Example #18
Source File: autoreject.py    From autoreject with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def fit(self, X, y=None):
        """Fit it.

        Parameters
        ----------
        X : array, shape (n_epochs, n_times)
            The data for one channel.
        y : None
            Redundant. Necessary to be compatible with sklearn
            API.
        """
        deltas = np.ptp(X, axis=1)
        self.deltas_ = deltas
        keep = deltas <= self.thresh
        # XXX: actually go over all the folds before setting the min
        # in skopt. Otherwise, may confuse skopt.
        if self.thresh < np.min(np.ptp(X, axis=1)):
            assert np.sum(keep) == 0
            keep = deltas <= np.min(np.ptp(X, axis=1))
        self.mean_ = _slicemean(X, keep, axis=0)
        return self 
Example #19
Source File: histograms.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 6 votes vote down vote up
def _hist_bin_rice(x, range):
    """
    Rice histogram bin estimator.

    Another simple estimator with no normality assumption. It has better
    performance for large data than Sturges, but tends to overestimate
    the number of bins. The number of bins is proportional to the cube
    root of data size (asymptotically optimal). The estimate depends
    only on size of the data.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / (2.0 * x.size ** (1.0 / 3)) 
Example #20
Source File: histograms.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 6 votes vote down vote up
def _hist_bin_sturges(x, range):
    """
    Sturges histogram bin estimator.

    A very simplistic estimator based on the assumption of normality of
    the data. This estimator has poor performance for non-normal data,
    which becomes especially obvious for large data sets. The estimate
    depends only on size of the data.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / (np.log2(x.size) + 1.0) 
Example #21
Source File: histograms.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 6 votes vote down vote up
def _hist_bin_sqrt(x, range):
    """
    Square root histogram bin estimator.

    Bin width is inversely proportional to the data size. Used by many
    programs for its simplicity.

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    return x.ptp() / np.sqrt(x.size) 
Example #22
Source File: tritools.py    From python3_ios with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def scale_factors(self):
        """
        Factors to rescale the triangulation into a unit square.

        Returns *k*, tuple of 2 scale factors.

        Returns
        -------
        k : tuple of 2 floats (kx, ky)
            Tuple of floats that would rescale the triangulation :
            ``[triangulation.x * kx, triangulation.y * ky]``
            fits exactly inside a unit square.

        """
        compressed_triangles = self._triangulation.get_masked_triangles()
        node_used = (np.bincount(np.ravel(compressed_triangles),
                                 minlength=self._triangulation.x.size) != 0)
        return (1 / np.ptp(self._triangulation.x[node_used]),
                1 / np.ptp(self._triangulation.y[node_used])) 
Example #23
Source File: lucidDream.py    From pyLucid with MIT License 6 votes vote down vote up
def spline_transform_multi(img, mask):
    bimask=mask>0
    M,N=np.where(bimask)
    w=np.ptp(N)+1
    h=np.ptp(M)+1
    kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    bound=cv2.dilate(bimask.astype('uint8'),kernel)-bimask
    y,x=np.where(bound>0)

    if x.size>4:
        newxy=thin_plate_transform(x,y,w,h,mask.shape[:2],num_points=5)

        new_img=cv2.remap(img,newxy,None,cv2.INTER_LINEAR)
        new_msk=cv2.remap(mask,newxy,None,cv2.INTER_NEAREST)
    elif x.size>0:
        new_img=img
        new_msk=mask
    return new_img,new_msk 
Example #24
Source File: histograms.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 5 votes vote down vote up
def _hist_bin_doane(x, range):
    """
    Doane's histogram bin estimator.

    Improved version of Sturges' formula which works better for
    non-normal data. See
    stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    del range  # unused
    if x.size > 2:
        sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
        sigma = np.std(x)
        if sigma > 0.0:
            # These three operations add up to
            # g1 = np.mean(((x - np.mean(x)) / sigma)**3)
            # but use only one temp array instead of three
            temp = x - np.mean(x)
            np.true_divide(temp, sigma, temp)
            np.power(temp, 3, temp)
            g1 = np.mean(temp)
            return x.ptp() / (1.0 + np.log2(x.size) +
                                    np.log2(1.0 + np.absolute(g1) / sg1))
    return 0.0 
Example #25
Source File: collision.py    From ratcave with MIT License 5 votes vote down vote up
def _fit_to_parent_vertices(self, vertices, scale_gain=1e5):
        axes = [a for a in range(3) if a != self.ignore_axis]
        x, z = np.ptp(vertices[:, axes], axis=0) / 2
        return x, scale_gain, z  # scale_gain makes it clear in the display that one dimension is being ignored. 
Example #26
Source File: utils.py    From BigGAN-TPU-TensorFlow with MIT License 5 votes vote down vote up
def imwrite(file, data):

    # Normalised [0,255] as integer
    d = 255 * (data - np.min(data)) / np.ptp(data)
    d = d.astype(np.uint8)

    imageio.imwrite(file, d, format="png") 
Example #27
Source File: autoreject.py    From autoreject with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fit(self, X, y=None):
        """Fit it."""
        if self.n_channels is None or self.n_times is None:
            raise ValueError('Cannot fit without knowing n_channels'
                             ' and n_times')
        X = X.reshape(-1, self.n_channels, self.n_times)
        deltas = np.array([np.ptp(d, axis=1) for d in X])
        epoch_deltas = deltas.max(axis=1)
        keep = epoch_deltas <= self.thresh
        self.mean_ = _slicemean(X, keep, axis=0)
        return self 
Example #28
Source File: test_numeric.py    From pySINDy with MIT License 5 votes vote down vote up
def test_ptp(self):
        a = [3, 4, 5, 10, -3, -5, 6.0]
        assert_equal(np.ptp(a, axis=0), 15.0) 
Example #29
Source File: test_matterport.py    From LayoutNetv2 with MIT License 5 votes vote down vote up
def get_ini_cor(cor_img, d1=21, d2=3):
    cor = convolve(cor_img, np.ones((d1, d1)), mode='constant', cval=0.0)
    cor_id = []
    cor_ = cor_img.sum(0)
    cor_ = (cor_-np.amin(cor_))/np.ptp(cor_)
    
    min_v = 0.25#0.05
    xs_ = find_N_peaks(cor_, r=26, min_v=min_v, N=None)[0]
    
    # spetial case for too less corner
    if xs_.shape[0] < 4:
        xs_ = find_N_peaks(cor_, r=26, min_v=0.05, N=None)[0]
        if xs_.shape[0] < 4:
            xs_ = find_N_peaks(cor_, r=26, min_v=0, N=None)[0]

    X_loc = xs_
    for x in X_loc:
        x_ = int(np.round(x))

        V_signal = cor[:, max(0, x_-d2):x_+d2+1].sum(1)
        y1, y2 = find_N_peaks_conv(V_signal, prominence=None,
                              distance=20, N=2)[0]
        cor_id.append((x, y1))
        cor_id.append((x, y2))

    cor_id = np.array(cor_id, np.float64)

    return cor_id 
Example #30
Source File: contour.py    From python3_ios with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def print_label(self, linecontour, labelwidth):
        "Return *False* if contours are too short for a label."
        return (len(linecontour) > 10 * labelwidth
                or (np.ptp(linecontour, axis=0) > 1.2 * labelwidth).any())