Python numpy.finfo() Examples

The following are 30 code examples of numpy.finfo(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: cartpole_bullet.py    From soccer-matlab with BSD 2-Clause "Simplified" License 9 votes vote down vote up
def __init__(self, renders=True):
    # start the bullet physics server
    self._renders = renders
    if (renders):
	    p.connect(p.GUI)
    else:
    	p.connect(p.DIRECT)

    observation_high = np.array([
          np.finfo(np.float32).max,
          np.finfo(np.float32).max,
          np.finfo(np.float32).max,
          np.finfo(np.float32).max])
    action_high = np.array([0.1])

    self.action_space = spaces.Discrete(9)
    self.observation_space = spaces.Box(-observation_high, observation_high)

    self.theta_threshold_radians = 1
    self.x_threshold = 2.4
    self._seed()
#    self.reset()
    self.viewer = None
    self._configure() 
Example #2
Source File: torch.py    From yolo2-pytorch with GNU Lesser General Public License v3.0 6 votes vote down vote up
def batch_iou_pair(yx_min1, yx_max1, yx_min2, yx_max2, min=float(np.finfo(np.float32).eps)):
    """
    Pairwisely calculates the IoU of two lists (at the same size M) of bounding boxes for N independent batches.
    :author 申瑞珉 (Ruimin Shen)
    :param yx_min1: The top left coordinates (y, x) of the first lists (size [N, M, 2]) of bounding boxes.
    :param yx_max1: The bottom right coordinates (y, x) of the first lists (size [N, M, 2]) of bounding boxes.
    :param yx_min2: The top left coordinates (y, x) of the second lists (size [N, M, 2]) of bounding boxes.
    :param yx_max2: The bottom right coordinates (y, x) of the second lists (size [N, M, 2]) of bounding boxes.
    :return: The lists (size [N, M]) of the IoU.
    """
    yx_min = torch.max(yx_min1, yx_min2)
    yx_max = torch.min(yx_max1, yx_max2)
    size = torch.clamp(yx_max - yx_min, min=0)
    intersect_area = torch.prod(size, -1)
    area1 = torch.prod(yx_max1 - yx_min1, -1)
    area2 = torch.prod(yx_max2 - yx_min2, -1)
    union_area = torch.clamp(area1 + area2 - intersect_area, min=min)
    return intersect_area / union_area 
Example #3
Source File: test_nifti1.py    From me-ica with GNU Lesser General Public License v2.1 6 votes vote down vote up
def test_rt_bias():
    # Check for bias in round trip
    # Parallel test to arraywriters
    rng = np.random.RandomState(20111214)
    mu, std, count = 100, 10, 100
    arr = rng.normal(mu, std, size=(count,))
    eps = np.finfo(np.float32).eps
    aff = np.eye(4)
    for in_dt in (np.float32, np.float64):
        arr_t = arr.astype(in_dt)
        for out_dt in IUINT_TYPES:
            img = Nifti1Image(arr_t, aff)
            img_back = round_trip(img)
            arr_back_sc = img_back.get_data()
            slope, inter = img_back.get_header().get_slope_inter()
            bias = np.mean(arr_t - arr_back_sc)
            # Get estimate for error
            max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, inter)
            # Hokey use of max_miss as a std estimate
            bias_thresh = np.max([max_miss / np.sqrt(count), eps])
            assert_true(np.abs(bias) < bias_thresh) 
Example #4
Source File: test_utils.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_complex128_pass(self):
        nulp = 5
        x = np.linspace(-20, 20, 50, dtype=np.float64)
        x = 10**x
        x = np.r_[-x, x]
        xi = x + x*1j

        eps = np.finfo(x.dtype).eps
        y = x + x*eps*nulp/2.
        assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
        assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
        # The test condition needs to be at least a factor of sqrt(2) smaller
        # because the real and imaginary parts both change
        y = x + x*eps*nulp/4.
        assert_array_almost_equal_nulp(xi, y + y*1j, nulp)

        epsneg = np.finfo(x.dtype).epsneg
        y = x - x*epsneg*nulp/2.
        assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
        assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
        y = x - x*epsneg*nulp/4.
        assert_array_almost_equal_nulp(xi, y + y*1j, nulp) 
Example #5
Source File: test_floating.py    From me-ica with GNU Lesser General Public License v2.1 6 votes vote down vote up
def test_check_nmant_nexp():
    # Routine for checking number of sigificand digits and exponent
    for t in IEEE_floats:
        nmant = np.finfo(t).nmant
        maxexp = np.finfo(t).maxexp
        assert_true(_check_nmant(t, nmant))
        assert_false(_check_nmant(t, nmant - 1))
        assert_false(_check_nmant(t, nmant + 1))
        assert_true(_check_maxexp(t, maxexp))
        assert_false(_check_maxexp(t, maxexp - 1))
        assert_false(_check_maxexp(t, maxexp + 1))
    # Check against type_info
    for t in ok_floats():
        ti = type_info(t)
        if ti['nmant'] != 106: # This check does not work for PPC double pair
            assert_true(_check_nmant(t, ti['nmant']))
        assert_true(_check_maxexp(t, ti['maxexp'])) 
Example #6
Source File: test_utils.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_complex64_pass(self):
        nulp = 5
        x = np.linspace(-20, 20, 50, dtype=np.float32)
        x = 10**x
        x = np.r_[-x, x]
        xi = x + x*1j

        eps = np.finfo(x.dtype).eps
        y = x + x*eps*nulp/2.
        assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
        assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
        y = x + x*eps*nulp/4.
        assert_array_almost_equal_nulp(xi, y + y*1j, nulp)

        epsneg = np.finfo(x.dtype).epsneg
        y = x - x*epsneg*nulp/2.
        assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
        assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
        y = x - x*epsneg*nulp/4.
        assert_array_almost_equal_nulp(xi, y + y*1j, nulp) 
Example #7
Source File: test_arraywriters.py    From me-ica with GNU Lesser General Public License v2.1 6 votes vote down vote up
def test_int_int_slope():
    # Conversion between (u)int and (u)int for slopes only
    eps = np.finfo(np.float64).eps
    rtol = 1e-7
    for in_dt in IUINT_TYPES:
        iinf = np.iinfo(in_dt)
        for out_dt in IUINT_TYPES:
            kinds = np.dtype(in_dt).kind + np.dtype(out_dt).kind
            if kinds in ('ii', 'uu', 'ui'):
                arrs = (np.array([iinf.min, iinf.max], dtype=in_dt),)
            elif kinds == 'iu':
                arrs = (np.array([iinf.min, 0], dtype=in_dt),
                        np.array([0, iinf.max], dtype=in_dt))
            for arr in arrs:
                try:
                    aw = SlopeArrayWriter(arr, out_dt)
                except ScalingError:
                    continue
                assert_false(aw.slope == 0)
                arr_back_sc = round_trip(aw)
                # integer allclose
                adiff = int_abs(arr - arr_back_sc)
                rdiff = adiff / (arr + eps)
                assert_true(np.all(rdiff < rtol)) 
Example #8
Source File: test_arraywriters.py    From me-ica with GNU Lesser General Public License v2.1 6 votes vote down vote up
def test_int_int_min_max():
    # Conversion between (u)int and (u)int
    eps = np.finfo(np.float64).eps
    rtol = 1e-6
    for in_dt in IUINT_TYPES:
        iinf = np.iinfo(in_dt)
        arr = np.array([iinf.min, iinf.max], dtype=in_dt)
        for out_dt in IUINT_TYPES:
            try:
                aw = SlopeInterArrayWriter(arr, out_dt)
            except ScalingError:
                continue
            arr_back_sc = round_trip(aw)
            # integer allclose
            adiff = int_abs(arr - arr_back_sc)
            rdiff = adiff / (arr + eps)
            assert_true(np.all(rdiff < rtol)) 
Example #9
Source File: test_utils.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_float64_pass(self):
        # The number of units of least precision
        # In this case, use a few places above the lowest level (ie nulp=1)
        nulp = 5
        x = np.linspace(-20, 20, 50, dtype=np.float64)
        x = 10**x
        x = np.r_[-x, x]

        # Addition
        eps = np.finfo(x.dtype).eps
        y = x + x*eps*nulp/2.
        assert_array_almost_equal_nulp(x, y, nulp)

        # Subtraction
        epsneg = np.finfo(x.dtype).epsneg
        y = x - x*epsneg*nulp/2.
        assert_array_almost_equal_nulp(x, y, nulp) 
Example #10
Source File: test_numeric.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_can_cast_values(self):
        # gh-5917
        for dt in np.sctypes['int'] + np.sctypes['uint']:
            ii = np.iinfo(dt)
            assert_(np.can_cast(ii.min, dt))
            assert_(np.can_cast(ii.max, dt))
            assert_(not np.can_cast(ii.min - 1, dt))
            assert_(not np.can_cast(ii.max + 1, dt))

        for dt in np.sctypes['float']:
            fi = np.finfo(dt)
            assert_(np.can_cast(fi.min, dt))
            assert_(np.can_cast(fi.max, dt))


# Custom exception class to test exception propagation in fromiter 
Example #11
Source File: tsd_net.py    From ConvLab with MIT License 6 votes vote down vote up
def finish_episode(self, log_probas, saved_rewards):
        R = 0
        policy_loss = []
        rewards = []
        for r in saved_rewards:
            R = r + 0.8 * R
            rewards.insert(0, R)

        rewards = torch.Tensor(rewards)
        # update: we notice improved performance without reward normalization
        # rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)

        for log_prob, reward in zip(log_probas, rewards):
            policy_loss.append((-log_prob * reward).unsqueeze(0))
        l = len(policy_loss)
        policy_loss = torch.cat(policy_loss).sum()
        return policy_loss / l 
Example #12
Source File: test_linalg.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_basic_property(self):
        # Check A = L L^H
        shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)]
        dtypes = (np.float32, np.float64, np.complex64, np.complex128)

        for shape, dtype in itertools.product(shapes, dtypes):
            np.random.seed(1)
            a = np.random.randn(*shape)
            if np.issubdtype(dtype, np.complexfloating):
                a = a + 1j*np.random.randn(*shape)

            t = list(range(len(shape)))
            t[-2:] = -1, -2

            a = np.matmul(a.transpose(t).conj(), a)
            a = np.asarray(a, dtype=dtype)

            c = np.linalg.cholesky(a)

            b = np.matmul(c, c.transpose(t).conj())
            assert_allclose(b, a,
                            err_msg="{} {}\n{}\n{}".format(shape, dtype, a, c),
                            atol=500 * a.shape[0] * np.finfo(dtype).eps) 
Example #13
Source File: mutual_information.py    From NeuroKit with MIT License 6 votes vote down vote up
def _mutual_information_varoquaux(x, y, bins=256, sigma=1, normalized=True):
    """Based on Gael Varoquaux's implementation: https://gist.github.com/GaelVaroquaux/ead9898bd3c973c40429."""
    jh = np.histogram2d(x, y, bins=bins)[0]

    # smooth the jh with a gaussian filter of given sigma
    scipy.ndimage.gaussian_filter(jh, sigma=sigma, mode="constant", output=jh)

    # compute marginal histograms
    jh = jh + np.finfo(float).eps
    sh = np.sum(jh)
    jh = jh / sh
    s1 = np.sum(jh, axis=0).reshape((-1, jh.shape[0]))
    s2 = np.sum(jh, axis=1).reshape((jh.shape[1], -1))

    if normalized:
        mi = ((np.sum(s1 * np.log(s1)) + np.sum(s2 * np.log(s2))) / np.sum(jh * np.log(jh))) - 1
    else:
        mi = np.sum(jh * np.log(jh)) - np.sum(s1 * np.log(s1)) - np.sum(s2 * np.log(s2))

    return mi 
Example #14
Source File: test_histograms.py    From recruit with Apache License 2.0 6 votes vote down vote up
def do_precision_upper_bound(self, float_small, float_large):
        eps = np.finfo(float_large).eps

        arr = np.array([1.0], float_small)
        range = np.array([0.0, 1.0 - eps], float_large)

        # test is looking for behavior when the bounds change between dtypes
        if range.astype(float_small)[-1] != 1:
            return

        # previously crashed
        count, x_loc = np.histogram(arr, bins=1, range=range)
        assert_equal(count, [1])

        # gh-10322 means that the type comes from arr - this may change
        assert_equal(x_loc.dtype, float_small) 
Example #15
Source File: test_histograms.py    From recruit with Apache License 2.0 6 votes vote down vote up
def do_precision_lower_bound(self, float_small, float_large):
        eps = np.finfo(float_large).eps

        arr = np.array([1.0], float_small)
        range = np.array([1.0 + eps, 2.0], float_large)

        # test is looking for behavior when the bounds change between dtypes
        if range.astype(float_small)[0] != 1:
            return

        # previously crashed
        count, x_loc = np.histogram(arr, bins=1, range=range)
        assert_equal(count, [1])

        # gh-10322 means that the type comes from arr - this may change
        assert_equal(x_loc.dtype, float_small) 
Example #16
Source File: tiles.py    From argus-freesound with MIT License 6 votes vote down vote up
def merge(self, tiles: List[np.ndarray], dtype=np.float32):
        if len(tiles) != len(self.crops):
            raise ValueError

        channels = 1 if len(tiles[0].shape) == 2 else tiles[0].shape[2]
        target_shape = self.image_height + self.margin_bottom + self.margin_top, self.image_width + self.margin_right + self.margin_left, channels

        image = np.zeros(target_shape, dtype=np.float64)
        norm_mask = np.zeros(target_shape, dtype=np.float64)

        w = np.dstack([self.weight] * channels)

        for tile, (x, y, tile_width, tile_height) in zip(tiles, self.crops):
            # print(x, y, tile_width, tile_height, image.shape)
            image[y:y + tile_height, x:x + tile_width] += tile * w
            norm_mask[y:y + tile_height, x:x + tile_width] += w

        # print(norm_mask.min(), norm_mask.max())
        norm_mask = np.clip(norm_mask, a_min=np.finfo(norm_mask.dtype).eps, a_max=None)
        normalized = np.divide(image, norm_mask).astype(dtype)
        crop = self.crop_to_orignal_size(normalized)
        return crop 
Example #17
Source File: numpy.py    From yolo2-pytorch with GNU Lesser General Public License v3.0 6 votes vote down vote up
def iou(yx_min1, yx_max1, yx_min2, yx_max2, min=None):
    """
    Calculates the IoU of two bounding boxes.
    :author 申瑞珉 (Ruimin Shen)
    :param yx_min1: The top left coordinates (y, x) of the first bounding boxe.
    :param yx_max1: The bottom right coordinates (y, x) of the first bounding boxe.
    :param yx_min2: The top left coordinates (y, x) of the second bounding boxe.
    :param yx_max2: The bottom right coordinates (y, x) of the second bounding boxe.
    :return: The IoU.
    """
    assert np.all(yx_min1 <= yx_max1)
    assert np.all(yx_min2 <= yx_max2)
    if min is None:
        min = np.finfo(yx_min1.dtype).eps
    yx_min = np.maximum(yx_min1, yx_min2)
    yx_max = np.minimum(yx_max1, yx_max2)
    intersect_area = np.multiply.reduce(np.maximum(0.0, yx_max - yx_min))
    area1 = np.multiply.reduce(yx_max1 - yx_min1)
    area2 = np.multiply.reduce(yx_max2 - yx_min2)
    assert np.all(intersect_area >= 0)
    assert np.all(intersect_area <= area1)
    assert np.all(intersect_area <= area2)
    union_area = np.maximum(area1 + area2 - intersect_area, min)
    return intersect_area / union_area 
Example #18
Source File: numpy.py    From yolo2-pytorch with GNU Lesser General Public License v3.0 6 votes vote down vote up
def iou_matrix(yx_min1, yx_max1, yx_min2, yx_max2, min=None):
    """
    Calculates the IoU of two lists of bounding boxes.
    :author 申瑞珉 (Ruimin Shen)
    :param yx_min1: The top left coordinates (y, x) of the first list (size [N1, 2]) of bounding boxes.
    :param yx_max1: The bottom right coordinates (y, x) of the first list (size [N1, 2]) of bounding boxes.
    :param yx_min2: The top left coordinates (y, x) of the second list (size [N2, 2]) of bounding boxes.
    :param yx_max2: The bottom right coordinates (y, x) of the second list (size [N2, 2]) of bounding boxes.
    :return: The matrix (size [N1, N2]) of the IoU.
    """
    if min is None:
        min = np.finfo(yx_min1.dtype).eps
    assert np.all(yx_min1 <= yx_max1)
    assert np.all(yx_min2 <= yx_max2)
    intersect_area = intersection_area(yx_min1, yx_max1, yx_min2, yx_max2)
    area1 = np.expand_dims(np.multiply.reduce(yx_max1 - yx_min1, -1), 1)
    area2 = np.expand_dims(np.multiply.reduce(yx_max2 - yx_min2, -1), 0)
    assert np.all(intersect_area >= 0)
    assert np.all(intersect_area <= area1)
    assert np.all(intersect_area <= area2)
    union_area = np.maximum(area1 + area2 - intersect_area, min)
    return intersect_area / union_area 
Example #19
Source File: SpectralClustering.py    From sparse-subspace-clustering-python with MIT License 6 votes vote down vote up
def SpectralClustering(CKSym, n):
    # This is direct port of JHU vision lab code. Could probably use sklearn SpectralClustering.
    CKSym = CKSym.astype(float)
    N, _ = CKSym.shape
    MAXiter = 1000  # Maximum number of iterations for KMeans
    REPlic = 20  # Number of replications for KMeans

    DN = np.diag(np.divide(1, np.sqrt(np.sum(CKSym, axis=0) + np.finfo(float).eps)))
    LapN = identity(N).toarray().astype(float) - np.matmul(np.matmul(DN, CKSym), DN)
    _, _, vN = np.linalg.svd(LapN)
    vN = vN.T
    kerN = vN[:, N - n:N]
    normN = np.sqrt(np.sum(np.square(kerN), axis=1))
    kerNS = np.divide(kerN, normN.reshape(len(normN), 1) + np.finfo(float).eps)
    km = KMeans(n_clusters=n, n_init=REPlic, max_iter=MAXiter, n_jobs=-1).fit(kerNS)
    return km.labels_ 
Example #20
Source File: test_umath.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_against_cmath(self):
        import cmath

        points = [-1-1j, -1+1j, +1-1j, +1+1j]
        name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
                    'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
        atol = 4*np.finfo(complex).eps
        for func in self.funcs:
            fname = func.__name__.split('.')[-1]
            cname = name_map.get(fname, fname)
            try:
                cfunc = getattr(cmath, cname)
            except AttributeError:
                continue
            for p in points:
                a = complex(func(np.complex_(p)))
                b = cfunc(p)
                assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b)) 
Example #21
Source File: utils.py    From reconstructing_faces_from_voices with GNU General Public License v3.0 6 votes vote down vote up
def get_fbank(voice, mfc_obj):
    # Extract log mel-spectrogra
    fbank = mfc_obj.sig2logspec(voice).astype('float32')

    # Mean and variance normalization of each mel-frequency 
    fbank = fbank - fbank.mean(axis=0)
    fbank = fbank / (fbank.std(axis=0)+np.finfo(np.float32).eps)

    # If the duration of a voice recording is less than 10 seconds (1000 frames),
    # repeat the recording until it is longer than 10 seconds and crop.
    full_frame_number = 1000
    init_frame_number = fbank.shape[0]
    while fbank.shape[0] < full_frame_number:
          fbank = np.append(fbank, fbank[0:init_frame_number], axis=0)
          fbank = fbank[0:full_frame_number,:]
    return fbank 
Example #22
Source File: test_getlimits.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_singleton(self):
        ftype = finfo(double)
        ftype2 = finfo(double)
        assert_equal(id(ftype), id(ftype2)) 
Example #23
Source File: test_core.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_limits_arithmetic(self):
        tiny = np.finfo(float).tiny
        a = array([tiny, 1. / tiny, 0.])
        assert_equal(getmaskarray(a / 2), [0, 0, 0])
        assert_equal(getmaskarray(2 / a), [1, 0, 1]) 
Example #24
Source File: test_utils.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_double(self):
        # Generate 1 + small deviation, check that adding eps gives a few UNL
        x = np.ones(10).astype(np.float64)
        x += 0.01 * np.random.randn(10).astype(np.float64)
        eps = np.finfo(np.float64).eps
        assert_array_max_ulp(x, x+eps, maxulp=200) 
Example #25
Source File: train.py    From yolo2-pytorch with GNU Lesser General Public License v3.0 5 votes vote down vote up
def backup_best(self, cls_ap, path):
        try:
            with open(self.model_dir + '.pkl', 'rb') as f:
                best = np.mean(list(pickle.load(f).values()))
        except:
            best = np.finfo(np.float32).min
        metric = np.mean(list(cls_ap.values()))
        if metric > best:
            with open(self.model_dir + '.pkl', 'wb') as f:
                pickle.dump(cls_ap, f)
            shutil.copy(path, self.model_dir + '.pth')
            logging.info('best model (%f) saved into %s.*' % (metric, self.model_dir))
        else:
            logging.info('best metric %f >= %f' % (best, metric)) 
Example #26
Source File: test_utils.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_float16_fail(self):
        nulp = 5
        x = np.linspace(-4, 4, 10, dtype=np.float16)
        x = 10**x
        x = np.r_[-x, x]

        eps = np.finfo(x.dtype).eps
        y = x + x*eps*nulp*2.
        assert_raises(AssertionError, assert_array_almost_equal_nulp,
                      x, y, nulp)

        epsneg = np.finfo(x.dtype).epsneg
        y = x - x*epsneg*nulp*2.
        assert_raises(AssertionError, assert_array_almost_equal_nulp,
                      x, y, nulp) 
Example #27
Source File: test_utils.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_float32_fail(self):
        nulp = 5
        x = np.linspace(-20, 20, 50, dtype=np.float32)
        x = 10**x
        x = np.r_[-x, x]

        eps = np.finfo(x.dtype).eps
        y = x + x*eps*nulp*2.
        assert_raises(AssertionError, assert_array_almost_equal_nulp,
                      x, y, nulp)

        epsneg = np.finfo(x.dtype).epsneg
        y = x - x*epsneg*nulp*2.
        assert_raises(AssertionError, assert_array_almost_equal_nulp,
                      x, y, nulp) 
Example #28
Source File: test_utils.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_float32_pass(self):
        nulp = 5
        x = np.linspace(-20, 20, 50, dtype=np.float32)
        x = 10**x
        x = np.r_[-x, x]

        eps = np.finfo(x.dtype).eps
        y = x + x*eps*nulp/2.
        assert_array_almost_equal_nulp(x, y, nulp)

        epsneg = np.finfo(x.dtype).epsneg
        y = x - x*epsneg*nulp/2.
        assert_array_almost_equal_nulp(x, y, nulp) 
Example #29
Source File: test_utils.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_float64_fail(self):
        nulp = 5
        x = np.linspace(-20, 20, 50, dtype=np.float64)
        x = 10**x
        x = np.r_[-x, x]

        eps = np.finfo(x.dtype).eps
        y = x + x*eps*nulp*2.
        assert_raises(AssertionError, assert_array_almost_equal_nulp,
                      x, y, nulp)

        epsneg = np.finfo(x.dtype).epsneg
        y = x - x*epsneg*nulp*2.
        assert_raises(AssertionError, assert_array_almost_equal_nulp,
                      x, y, nulp) 
Example #30
Source File: test_random.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_uniform_range_bounds(self):
        fmin = np.finfo('float').min
        fmax = np.finfo('float').max

        func = np.random.uniform
        assert_raises(OverflowError, func, -np.inf, 0)
        assert_raises(OverflowError, func,  0,      np.inf)
        assert_raises(OverflowError, func,  fmin,   fmax)
        assert_raises(OverflowError, func, [-np.inf], [0])
        assert_raises(OverflowError, func, [0], [np.inf])

        # (fmax / 1e17) - fmin is within range, so this should not throw
        # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
        # DBL_MAX by increasing fmin a bit
        np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)