Python numpy.trunc() Examples

The following are 30 code examples of numpy.trunc(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: utils.py    From VIP with MIT License 6 votes vote down vote up
def idl_round(x):
    """
    Round to the *nearest* integer, half-away-from-zero.

    Parameters
    ----------
    x : array-like
        Number or array to be rounded

    Returns
    -------
    r_rounded : array-like
        note that the returned values are floats

    Notes
    -----
    IDL ``ROUND`` rounds to the *nearest* integer (commercial rounding),
    unlike numpy's round/rint, which round to the nearest *even*
    value (half-to-even, financial rounding) as defined in IEEE-754
    standard.

    """
    return np.trunc(x + np.copysign(0.5, x)) 
Example #2
Source File: image_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def test_adjust_gamma_less_one(self):
    """Verifying the output with expected results for gamma
    correction with gamma equal to half"""
    with self.test_session():
      x_np = np.arange(0, 255, 4, np.uint8).reshape(8,8)
      y = image_ops.adjust_gamma(x_np, gamma=0.5)
      y_tf = np.trunc(y.eval())

      y_np = np.array([[  0,  31,  45,  55,  63,  71,  78,  84],
          [ 90,  95, 100, 105, 110, 115, 119, 123],
          [127, 131, 135, 139, 142, 146, 149, 153],
          [156, 159, 162, 165, 168, 171, 174, 177],
          [180, 183, 186, 188, 191, 194, 196, 199],
          [201, 204, 206, 209, 211, 214, 216, 218],
          [221, 223, 225, 228, 230, 232, 234, 236],
          [238, 241, 243, 245, 247, 249, 251, 253]], dtype=np.float32)

      self.assertAllClose(y_tf, y_np, 1e-6) 
Example #3
Source File: image_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def test_adjust_gamma_greater_one(self):
    """Verifying the output with expected results for gamma
    correction with gamma equal to two"""
    with self.test_session():
      x_np = np.arange(0, 255, 4, np.uint8).reshape(8,8)
      y = image_ops.adjust_gamma(x_np, gamma=2)
      y_tf = np.trunc(y.eval())

      y_np = np.array([[  0,   0,   0,   0,   1,   1,   2,   3],
          [  4,   5,   6,   7,   9,  10,  12,  14],
          [ 16,  18,  20,  22,  25,  27,  30,  33],
          [ 36,  39,  42,  45,  49,  52,  56,  60],
          [ 64,  68,  72,  76,  81,  85,  90,  95],
          [100, 105, 110, 116, 121, 127, 132, 138],
          [144, 150, 156, 163, 169, 176, 182, 189],
          [196, 203, 211, 218, 225, 233, 241, 249]], dtype=np.float32)

      self.assertAllClose(y_tf, y_np, 1e-6) 
Example #4
Source File: RQGA.py    From QuantumGeneticAlgorithms with MIT License 6 votes vote down vote up
def RQGA(n, string_num):
    psi_=psi(string_num)
    H=hadamard(n)
    psi_=np.dot(H,psi_)
    print(psi_)
    print()
    iter=np.trunc(maxiter(n))
    iter=int(round(iter))
    for i in range (1,iter):
        U_O=U_Oracle(n)
        print(U_O)
        print()
        psi_=np.dot(U_O,psi_)
        print(psi_)
        print()
        D=ia(n)
        psi_=np.dot(D,psi_)
    print(psi_)

#########################################################
#                                                       #
# MAIN PROGRAM                                          #
#                                                       #
######################################################### 
Example #5
Source File: virtual_pwsMagnet.py    From qkit with GNU General Public License v2.0 6 votes vote down vote up
def check_for_quench(self, wait=5, threshold=0.95, repetitions=1000):
        '''
        Checks the magent for quench

        Input:
            wait (float) : waiting time in sec, default=5
            threshold (float) : maximum voltage in volts, default=0.95
            repetitions (int) : number of times quenching is checked, default=1000

        Output:
            None
        '''
        logging.debug(__name__ + 'check_for_quench()')
        for i in range(repetitions):
            time.sleep(wait)
            voltage = self.do_get_voltage()
            current = self.do_get_current()
            print "V=" + str(np.trunc(voltage * 1000) / 1000.) + "V, I=" + str(
                np.trunc(current * 1000) / 1000.) + "A, R=" + str(int(np.trunc(voltage / current * 1000))) + "mOhm"
            if voltage > threshold:
                print "WARNING! Magnet quench!! ramping down the coil..."
                self.ramp_current(0., 2e-3, wait=0.2, showvalue=True)
                return 
Example #6
Source File: hope_numpy.py    From hope with GNU General Public License v3.0 6 votes vote down vote up
def numpy_math(x):
    # **Trigonometric functions**
    a = np.sin(x)     #   Trigonometric sine, element-wise.
    a = np.cos(x)     #   Cosine elementwise.
    a = np.tan(x)     #   Compute tangent element-wise.
    a = np.arcsin(x)     #Inverse sine, element-wise.
    a = np.arccos(x)     #Trigonometric inverse cosine, element-wise.
    a = np.arctan(x)     #Trigonometric inverse tangent, element-wise.
    
    # **Hyperbolic functions**
    a = np.sinh(x)     #  Hyperbolic sine, element-wise.
    a = np.cosh(x)     #  Hyperbolic cosine, element-wise.
    a = np.tanh(x)     #  Compute hyperbolic tangent element-wise.
    
    
    # **Miscellaneous**
    a = np.exp(x)     #   Calculate the exponential of all elements in the input array.
    a = np.sum(x)     #          Return the sum of array elements.
    a = np.sqrt(x)     #         Return the positive square-root of an array, element-wise.
    a = np.ceil(x)     #         Return the ceiling of the input, element-wise.
    a = np.floor(x)     #        Return the floor of the input, element-wise.
    a = np.trunc(x)     #        Return the truncated value of the input, element-wise.
    a = np.fabs(x)     #            Compute the absolute values element-wise
    a = np.pi     #              Returns the pi constant 
Example #7
Source File: test_op_level3.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def test_unary_identity():
    for op, ref in [(relay.zeros_like, np.zeros_like),
               (relay.ones_like, np.ones_like),
               (relay.ceil, np.ceil),
               (relay.floor, np.floor),
               (relay.trunc, np.trunc),
               (relay.round, np.round),
               (relay.abs, np.abs),
               (relay.copy, None), # np.copy
               (relay.negative, np.negative),
               (relay.sign, np.sign)]:
        shape = (8, 9, 4)
        x = relay.var("x", relay.TensorType(shape, "float32"))
        y = op(x)
        yy = run_infer_type(y)
        assert yy.checked_type == relay.TensorType(shape, "float32")

        if ref is not None:
            data = np.random.rand(*shape).astype('float32')
            intrp = create_executor()
            op_res = intrp.evaluate(y, { x: relay.const(data) })
            ref_res = ref(data)
            np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) 
Example #8
Source File: mini_batch_handler.py    From recnet with MIT License 5 votes vote down vote up
def check_out_data_set(self):

        for set in ['train', 'valid', 'test']:
            if self.prm.data[set + "_data_name"] != None:
                file_name = self.prm.data["data_location"] + self.prm.data[set + "_data_name"]
                try:
                    d = klepto.archives.file_archive(file_name, cached=True,serialized=True)
                    d.load()
                    data_set_x = d['x']
                    data_set_y = d['y']
                    d.clear()
                    self.prm.data[set + "_set_len"] = data_set_x.__len__()
                    if data_set_x.__len__() != data_set_y.__len__():
                        raise Warning("x and y " + set + "_data_name have not the same length")
                    self.prm.data["x_size"] = data_set_x[0].shape[1]
                    if self.prm.data["x_size"] != int(self.prm.struct["net_size"][0]):
                        raise Warning(set + " data x size and net input size are unequal")
                    if self.prm.optimize['CTC'] == False:
                        self.prm.data["y_size"] = data_set_y[0].shape[1]
                        if self.prm.data["y_size"] != int(self.prm.struct["net_size"][-1]):
                            raise Warning(set + " data y size and net input size are unequal")
                    else:
                        self.prm.data["y_size"] = self.prm.struct["net_size"][-1]
                    del data_set_x
                    del data_set_y
                    self.prm.data[set + "_batch_quantity"] = int(np.trunc(self.prm.data[set + "_set_len" ]/self.prm.data["batch_size"]))
                    self.prm.data["checked_data"][set] = True
                except KeyError:
                    raise Warning("data_location or " + set + "_data_name wrong")





    ###### Create mini batches and storage them in klepto files
    ######################################## 
Example #9
Source File: cross_val.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def __init__(self, n, k):
        """
        K-Folds cross validation iterator:
        Provides train/test indexes to split data in train test sets

        Parameters
        ----------
        n: int
            Total number of elements
        k: int
            number of folds

        Examples
        --------
        >>> from scikits.learn import cross_val
        >>> X = [[1, 2], [3, 4], [1, 2], [3, 4]]
        >>> y = [1, 2, 3, 4]
        >>> kf = cross_val.KFold(4, k=2)
        >>> for train_index, test_index in kf:
        ...    print "TRAIN:", train_index, "TEST:", test_index
        ...    X_train, X_test, y_train, y_test = cross_val.split(train_index, test_index, X, y)
        TRAIN: [False False  True  True] TEST: [ True  True False False]
        TRAIN: [ True  True False False] TEST: [False False  True  True]

        Notes
        -----
        All the folds have size trunc(n/k), the last one has the complementary
        """
        assert k>0, ValueError('cannot have k below 1')
        assert k<n, ValueError('cannot have k=%d greater than %d'% (k, n))
        self.n = n
        self.k = k 
Example #10
Source File: test_functions.py    From hope with GNU General Public License v3.0 5 votes vote down vote up
def test_func_trunc(a, b, c): return np.trunc(a) 
Example #11
Source File: formats.py    From Carnets with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def set_jds(self, val1, val2):
        self._check_scale(self._scale)  # Validate scale.

        sum12, err12 = two_sum(val1, val2)
        iy_start = np.trunc(sum12).astype(int)
        extra, y_frac = two_sum(sum12, -iy_start)
        y_frac += extra + err12

        val = (val1 + val2).astype(np.double)
        iy_start = np.trunc(val).astype(int)

        imon = np.ones_like(iy_start)
        iday = np.ones_like(iy_start)
        ihr = np.zeros_like(iy_start)
        imin = np.zeros_like(iy_start)
        isec = np.zeros_like(y_frac)

        # Possible enhancement: use np.unique to only compute start, stop
        # for unique values of iy_start.
        scale = self.scale.upper().encode('ascii')
        jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
                                          ihr, imin, isec)
        jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
                                      ihr, imin, isec)

        t_start = Time(jd1_start, jd2_start, scale=self.scale, format='jd')
        t_end = Time(jd1_end, jd2_end, scale=self.scale, format='jd')
        t_frac = t_start + (t_end - t_start) * y_frac

        self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2) 
Example #12
Source File: anomaly_detect_ts.py    From AnomalyDetection with Apache License 2.0 5 votes vote down vote up
def _get_max_outliers(data, max_percent_anomalies):
    """
    Calculates the max_outliers for an input data set

      data : pandas DataFrame
        the input data set
      max_percent_anomalies : float
        the input maximum number of anomalies per percent of data set values
    """
    max_outliers = int(np.trunc(data.size * max_percent_anomalies))
    assert max_outliers, 'With longterm=True, AnomalyDetection splits the data into 2 week periods by default. You have {0} observations in a period, which is too few. Set a higher piecewise_median_period_weeks.'.format(
        data.size)
    return max_outliers 
Example #13
Source File: test_operations.py    From myia with MIT License 5 votes vote down vote up
def test_elemwise_trunc(a):
    return np.trunc(a) 
Example #14
Source File: test_operations.py    From myia with MIT License 5 votes vote down vote up
def test_trunc(a):
    return math.trunc(a) 
Example #15
Source File: circlefit.py    From qkit with GNU General Public License v2.0 5 votes vote down vote up
def _periodic_boundary(self,x,bound):
        return np.fmod(x,bound)-np.trunc(x/bound)*bound 
Example #16
Source File: woebin.py    From scorecardpy with MIT License 5 votes vote down vote up
def pretty(low, high, n):
    '''
    pretty breakpoints, the same as pretty function in R
    
    Params
    ------
    low: minimal value 
    low: maximal value 
    n: number of intervals
    
    Returns
    ------
    numpy.ndarray
        returns a breakpoints array
    '''
    # nicenumber
    def nicenumber(x):
        exp = np.trunc(np.log10(abs(x)))
        f   = abs(x) / 10**exp
        if f < 1.5:
            nf = 1.
        elif f < 3.:
            nf = 2.
        elif f < 7.:
            nf = 5.
        else:
            nf = 10.
        return np.sign(x) * nf * 10.**exp
    # pretty breakpoints
    d     = abs(nicenumber((high-low)/(n-1)))
    miny  = np.floor(low  / d) * d
    maxy  = np.ceil (high / d) * d
    return np.arange(miny, maxy+0.5*d, d)
# required in woebin2 # return initial binning 
Example #17
Source File: circlefit.py    From resonator_tools with GNU General Public License v2.0 5 votes vote down vote up
def _periodic_boundary(self,x,bound):
        return np.fmod(x,bound)-np.trunc(x/bound)*bound 
Example #18
Source File: loda.py    From ad_examples with MIT License 5 votes vote down vote up
def pdf_hist_equal_bins(x, h, minpdf=1e-8):
    # here we are assuming a regular histogram where
    # h.breaks[1] - h.breaks[0] would return the width of the bin
    p = (x - h.breaks[0]) / (h.breaks[1] - h.breaks[0])
    ndensity = len(h.density)
    p = np.array([min(int(np.trunc(v)), ndensity-1) for v in p])
    d = h.density[p]
    # quick hack to make sure d is never 0
    d = np.array([max(v, minpdf) for v in d])
    return d 
Example #19
Source File: loda.py    From ad_examples with MIT License 5 votes vote down vote up
def get_bin_for_equal_hist(breaks, x):
    if x < breaks[0]:
        return 0
    if x > breaks[len(breaks)-1]:
        return len(breaks)-1
    i = np.trunc((x - breaks[0]) / (breaks[1] - breaks[0]))  # get integral value
    return int(i) 
Example #20
Source File: topography.py    From typhon with MIT License 5 votes vote down vote up
def get_native_grids(lat_min, lon_min, lat_max, lon_max):
        """
        Returns the latitude and longitude grid at native SRTM30 resolution
        that are included in the given rectangle.

        Args:
            lat_min: The latitude coordinate of the lower left corner.
            lon_min: The longitude coordinate of the lower left corner.
            lat_max: The latitude coordinate of the upper right corner.
            lon_max: The latitude coordinate of the upper right corner.
        Returns:
            Tuple :code:`(lats, lons)` of 1D-arrays containing the latitude
            and longitude coordinates of the SRTM30 data points within the
            given rectangle.
        """
        i = (90 - lat_max) / SRTM30._dlat
        i_max = np.trunc(i)
        if not i_max < i:
            i_max = i_max + 1
        i = (90 - lat_min) / SRTM30._dlat
        i_min = np.trunc(i)
        lat_grid = 90 + 0.5 * SRTM30._dlat - np.arange(i_max, i_min + 1) * SRTM30._dlat

        j = (lon_max + 180) / SRTM30._dlon
        j_max = np.trunc((lon_max + 180.0) / SRTM30._dlon)
        if not j_max < j:
            j_max = j_max - 1

        j_min = np.trunc((lon_min + 180.0) / SRTM30._dlon)
        lon_grid = -180 + 0.5 * SRTM30._dlon
        lon_grid += np.arange(j_min, j_max + 1) * SRTM30._dlon

        return lat_grid, lon_grid 
Example #21
Source File: rasterlayer.py    From Pyspatialml with GNU General Public License v3.0 5 votes vote down vote up
def __trunc__(self):
        """Truncating to an integral using math.trunc(), i.e. math.trunc(layer)
        """

        def func(arr):
            return np.trunc(arr)

        return self._arith(func) 
Example #22
Source File: test_var.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_numpy_method():
    # This type of code is used frequently by PyMC3 users
    x = tt.dmatrix('x')
    data = np.random.rand(5, 5)
    x.tag.test_value = data
    for fct in [np.arccos, np.arccosh, np.arcsin, np.arcsinh,
                np.arctan, np.arctanh, np.ceil, np.cos, np.cosh, np.deg2rad,
                np.exp, np.exp2, np.expm1, np.floor, np.log,
                np.log10, np.log1p, np.log2, np.rad2deg,
                np.sin, np.sinh, np.sqrt, np.tan, np.tanh, np.trunc]:
        y = fct(x)
        f = theano.function([x], y)
        utt.assert_allclose(np.nan_to_num(f(data)),
                            np.nan_to_num(fct(data))) 
Example #23
Source File: Unit.py    From PySpice with GNU General Public License v3.0 5 votes vote down vote up
def __trunc__(self):
        """trunc(self): Truncates self to an Integral.

        Returns an Integral i such that:
          * i>0 iff self>0;
          * abs(i) <= abs(self);
          * for any Integral j satisfying the first two conditions,
            abs(i) >= abs(j) [i.e. i has "maximal" abs among those].
        i.e. "truncate towards 0".
        """
        raise NotImplementedError

    ############################################## 
Example #24
Source File: trunc.py    From mars with Apache License 2.0 5 votes vote down vote up
def trunc(x, out=None, where=None, **kwargs):
    """
    Return the truncated value of the input, element-wise.

    The truncated value of the scalar `x` is the nearest integer `i` which
    is closer to zero than `x` is. In short, the fractional part of the
    signed number `x` is discarded.

    Parameters
    ----------
    x : array_like
        Input data.
    out : Tensor, None, or tuple of Tensor and None, optional
        A location into which the result is stored. If provided, it must have
        a shape that the inputs broadcast to. If not provided or `None`,
        a freshly-allocated tensor is returned. A tuple (possible only as a
        keyword argument) must have length equal to the number of outputs.
    where : array_like, optional
        Values of True indicate to calculate the ufunc at that position, values
        of False indicate to leave the value in the output alone.
    **kwargs

    Returns
    -------
    y : Tensor or scalar
        The truncated value of each element in `x`.

    See Also
    --------
    ceil, floor, rint

    Examples
    --------
    >>> import mars.tensor as mt

    >>> a = mt.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    >>> mt.trunc(a).execute()
    array([-1., -1., -0.,  0.,  1.,  1.,  2.])
    """
    op = TensorTrunc(**kwargs)
    return op(x, out=out, where=where) 
Example #25
Source File: test_var.py    From D-VAE with MIT License 5 votes vote down vote up
def test_numpy_method():
    # This type of code is used frequently by PyMC3 users
    x = tt.dmatrix('x')
    data = np.random.rand(5, 5)
    x.tag.test_value = data
    for fct in [np.arccos, np.arccosh, np.arcsin, np.arcsinh,
                np.arctan, np.arctanh, np.ceil, np.cos, np.cosh, np.deg2rad,
                np.exp, np.exp2, np.expm1, np.floor, np.log,
                np.log10, np.log1p, np.log2, np.rad2deg,
                np.sin, np.sinh, np.sqrt, np.tan, np.tanh, np.trunc]:
        y = fct(x)
        f = theano.function([x], y)
        utt.assert_allclose(np.nan_to_num(f(data)),
                            np.nan_to_num(fct(data))) 
Example #26
Source File: basic.py    From D-VAE with MIT License 5 votes vote down vote up
def impl(self, x):
        return numpy.trunc(x) 
Example #27
Source File: cross_val.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def __init__(self, n, k):
        """
        K-Folds cross validation iterator:
        Provides train/test indexes to split data in train test sets

        Parameters
        ----------
        n: int
            Total number of elements
        k: int
            number of folds

        Examples
        --------
        >>> from scikits.learn import cross_val
        >>> X = [[1, 2], [3, 4], [1, 2], [3, 4]]
        >>> y = [1, 2, 3, 4]
        >>> kf = cross_val.KFold(4, k=2)
        >>> for train_index, test_index in kf:
        ...    print "TRAIN:", train_index, "TEST:", test_index
        ...    X_train, X_test, y_train, y_test = cross_val.split(train_index, test_index, X, y)
        TRAIN: [False False  True  True] TEST: [ True  True False False]
        TRAIN: [ True  True False False] TEST: [False False  True  True]

        Notes
        -----
        All the folds have size trunc(n/k), the last one has the complementary
        """
        assert k>0, ValueError('cannot have k below 1')
        assert k<n, ValueError('cannot have k=%d greater than %d'% (k, n))
        self.n = n
        self.k = k 
Example #28
Source File: thresholding.py    From nistats with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _compute_hommel_value(z_vals, alpha, verbose=False):
    """Compute the All-Resolution Inference hommel-value"""
    if alpha < 0 or alpha > 1:
        raise ValueError('alpha should be between 0 and 1')
    z_vals_ = - np.sort(- z_vals)
    p_vals = norm.sf(z_vals_)
    n_samples = len(p_vals)

    if len(p_vals) == 1:
        return p_vals[0] > alpha
    if p_vals[0] > alpha:
        return n_samples
    slopes = (alpha - p_vals[: - 1]) / np.arange(n_samples, 1, -1)
    slope = np.max(slopes)
    hommel_value = np.trunc(n_samples + (alpha - slope * n_samples) / slope)
    if verbose:
        try:
            from matplotlib import pyplot as plt
        except ImportError:
            warnings.warn('"verbose" option requires the package Matplotlib.'
                          'Please install it using `pip install matplotlib`.')
        else:
            plt.figure()
            plt.plot(p_vals, 'o')
            plt.plot([n_samples - hommel_value, n_samples], [0, alpha])
            plt.plot([0, n_samples], [0, 0], 'k')
            plt.show(block=False)
    return np.minimum(hommel_value, n_samples) 
Example #29
Source File: report_card.py    From nussl with MIT License 5 votes vote down vote up
def truncate(values, decs=2):
    return np.trunc(values*10**decs)/(10**decs) 
Example #30
Source File: numeric_functions.py    From nufhe with GNU General Public License v3.0 5 votes vote down vote up
def double_to_t32(d: float):
    return ((d - numpy.trunc(d)) * 2**32).astype(Torus32)