Python numpy.finfo() Examples
The following are 30 code examples for showing how to use numpy.finfo(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example 1
Project: soccer-matlab Author: utra-robosoccer File: cartpole_bullet.py License: BSD 2-Clause "Simplified" License | 7 votes |
def __init__(self, renders=True): # start the bullet physics server self._renders = renders if (renders): p.connect(p.GUI) else: p.connect(p.DIRECT) observation_high = np.array([ np.finfo(np.float32).max, np.finfo(np.float32).max, np.finfo(np.float32).max, np.finfo(np.float32).max]) action_high = np.array([0.1]) self.action_space = spaces.Discrete(9) self.observation_space = spaces.Box(-observation_high, observation_high) self.theta_threshold_radians = 1 self.x_threshold = 2.4 self._seed() # self.reset() self.viewer = None self._configure()
Example 2
Project: sparse-subspace-clustering-python Author: abhinav4192 File: SpectralClustering.py License: MIT License | 6 votes |
def SpectralClustering(CKSym, n): # This is direct port of JHU vision lab code. Could probably use sklearn SpectralClustering. CKSym = CKSym.astype(float) N, _ = CKSym.shape MAXiter = 1000 # Maximum number of iterations for KMeans REPlic = 20 # Number of replications for KMeans DN = np.diag(np.divide(1, np.sqrt(np.sum(CKSym, axis=0) + np.finfo(float).eps))) LapN = identity(N).toarray().astype(float) - np.matmul(np.matmul(DN, CKSym), DN) _, _, vN = np.linalg.svd(LapN) vN = vN.T kerN = vN[:, N - n:N] normN = np.sqrt(np.sum(np.square(kerN), axis=1)) kerNS = np.divide(kerN, normN.reshape(len(normN), 1) + np.finfo(float).eps) km = KMeans(n_clusters=n, n_init=REPlic, max_iter=MAXiter, n_jobs=-1).fit(kerNS) return km.labels_
Example 3
Project: argus-freesound Author: lRomul File: tiles.py License: MIT License | 6 votes |
def merge(self, tiles: List[np.ndarray], dtype=np.float32): if len(tiles) != len(self.crops): raise ValueError channels = 1 if len(tiles[0].shape) == 2 else tiles[0].shape[2] target_shape = self.image_height + self.margin_bottom + self.margin_top, self.image_width + self.margin_right + self.margin_left, channels image = np.zeros(target_shape, dtype=np.float64) norm_mask = np.zeros(target_shape, dtype=np.float64) w = np.dstack([self.weight] * channels) for tile, (x, y, tile_width, tile_height) in zip(tiles, self.crops): # print(x, y, tile_width, tile_height, image.shape) image[y:y + tile_height, x:x + tile_width] += tile * w norm_mask[y:y + tile_height, x:x + tile_width] += w # print(norm_mask.min(), norm_mask.max()) norm_mask = np.clip(norm_mask, a_min=np.finfo(norm_mask.dtype).eps, a_max=None) normalized = np.divide(image, norm_mask).astype(dtype) crop = self.crop_to_orignal_size(normalized) return crop
Example 4
Project: NeuroKit Author: neuropsychology File: mutual_information.py License: MIT License | 6 votes |
def _mutual_information_varoquaux(x, y, bins=256, sigma=1, normalized=True): """Based on Gael Varoquaux's implementation: https://gist.github.com/GaelVaroquaux/ead9898bd3c973c40429.""" jh = np.histogram2d(x, y, bins=bins)[0] # smooth the jh with a gaussian filter of given sigma scipy.ndimage.gaussian_filter(jh, sigma=sigma, mode="constant", output=jh) # compute marginal histograms jh = jh + np.finfo(float).eps sh = np.sum(jh) jh = jh / sh s1 = np.sum(jh, axis=0).reshape((-1, jh.shape[0])) s2 = np.sum(jh, axis=1).reshape((jh.shape[1], -1)) if normalized: mi = ((np.sum(s1 * np.log(s1)) + np.sum(s2 * np.log(s2))) / np.sum(jh * np.log(jh))) - 1 else: mi = np.sum(jh * np.log(jh)) - np.sum(s1 * np.log(s1)) - np.sum(s2 * np.log(s2)) return mi
Example 5
Project: ConvLab Author: ConvLab File: tsd_net.py License: MIT License | 6 votes |
def finish_episode(self, log_probas, saved_rewards): R = 0 policy_loss = [] rewards = [] for r in saved_rewards: R = r + 0.8 * R rewards.insert(0, R) rewards = torch.Tensor(rewards) # update: we notice improved performance without reward normalization # rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps) for log_prob, reward in zip(log_probas, rewards): policy_loss.append((-log_prob * reward).unsqueeze(0)) l = len(policy_loss) policy_loss = torch.cat(policy_loss).sum() return policy_loss / l
Example 6
Project: me-ica Author: ME-ICA File: test_arraywriters.py License: GNU Lesser General Public License v2.1 | 6 votes |
def test_int_int_min_max(): # Conversion between (u)int and (u)int eps = np.finfo(np.float64).eps rtol = 1e-6 for in_dt in IUINT_TYPES: iinf = np.iinfo(in_dt) arr = np.array([iinf.min, iinf.max], dtype=in_dt) for out_dt in IUINT_TYPES: try: aw = SlopeInterArrayWriter(arr, out_dt) except ScalingError: continue arr_back_sc = round_trip(aw) # integer allclose adiff = int_abs(arr - arr_back_sc) rdiff = adiff / (arr + eps) assert_true(np.all(rdiff < rtol))
Example 7
Project: me-ica Author: ME-ICA File: test_arraywriters.py License: GNU Lesser General Public License v2.1 | 6 votes |
def test_int_int_slope(): # Conversion between (u)int and (u)int for slopes only eps = np.finfo(np.float64).eps rtol = 1e-7 for in_dt in IUINT_TYPES: iinf = np.iinfo(in_dt) for out_dt in IUINT_TYPES: kinds = np.dtype(in_dt).kind + np.dtype(out_dt).kind if kinds in ('ii', 'uu', 'ui'): arrs = (np.array([iinf.min, iinf.max], dtype=in_dt),) elif kinds == 'iu': arrs = (np.array([iinf.min, 0], dtype=in_dt), np.array([0, iinf.max], dtype=in_dt)) for arr in arrs: try: aw = SlopeArrayWriter(arr, out_dt) except ScalingError: continue assert_false(aw.slope == 0) arr_back_sc = round_trip(aw) # integer allclose adiff = int_abs(arr - arr_back_sc) rdiff = adiff / (arr + eps) assert_true(np.all(rdiff < rtol))
Example 8
Project: me-ica Author: ME-ICA File: test_floating.py License: GNU Lesser General Public License v2.1 | 6 votes |
def test_check_nmant_nexp(): # Routine for checking number of sigificand digits and exponent for t in IEEE_floats: nmant = np.finfo(t).nmant maxexp = np.finfo(t).maxexp assert_true(_check_nmant(t, nmant)) assert_false(_check_nmant(t, nmant - 1)) assert_false(_check_nmant(t, nmant + 1)) assert_true(_check_maxexp(t, maxexp)) assert_false(_check_maxexp(t, maxexp - 1)) assert_false(_check_maxexp(t, maxexp + 1)) # Check against type_info for t in ok_floats(): ti = type_info(t) if ti['nmant'] != 106: # This check does not work for PPC double pair assert_true(_check_nmant(t, ti['nmant'])) assert_true(_check_maxexp(t, ti['maxexp']))
Example 9
Project: me-ica Author: ME-ICA File: test_nifti1.py License: GNU Lesser General Public License v2.1 | 6 votes |
def test_rt_bias(): # Check for bias in round trip # Parallel test to arraywriters rng = np.random.RandomState(20111214) mu, std, count = 100, 10, 100 arr = rng.normal(mu, std, size=(count,)) eps = np.finfo(np.float32).eps aff = np.eye(4) for in_dt in (np.float32, np.float64): arr_t = arr.astype(in_dt) for out_dt in IUINT_TYPES: img = Nifti1Image(arr_t, aff) img_back = round_trip(img) arr_back_sc = img_back.get_data() slope, inter = img_back.get_header().get_slope_inter() bias = np.mean(arr_t - arr_back_sc) # Get estimate for error max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, inter) # Hokey use of max_miss as a std estimate bias_thresh = np.max([max_miss / np.sqrt(count), eps]) assert_true(np.abs(bias) < bias_thresh)
Example 10
Project: yolo2-pytorch Author: ruiminshen File: numpy.py License: GNU Lesser General Public License v3.0 | 6 votes |
def iou(yx_min1, yx_max1, yx_min2, yx_max2, min=None): """ Calculates the IoU of two bounding boxes. :author 申瑞珉 (Ruimin Shen) :param yx_min1: The top left coordinates (y, x) of the first bounding boxe. :param yx_max1: The bottom right coordinates (y, x) of the first bounding boxe. :param yx_min2: The top left coordinates (y, x) of the second bounding boxe. :param yx_max2: The bottom right coordinates (y, x) of the second bounding boxe. :return: The IoU. """ assert np.all(yx_min1 <= yx_max1) assert np.all(yx_min2 <= yx_max2) if min is None: min = np.finfo(yx_min1.dtype).eps yx_min = np.maximum(yx_min1, yx_min2) yx_max = np.minimum(yx_max1, yx_max2) intersect_area = np.multiply.reduce(np.maximum(0.0, yx_max - yx_min)) area1 = np.multiply.reduce(yx_max1 - yx_min1) area2 = np.multiply.reduce(yx_max2 - yx_min2) assert np.all(intersect_area >= 0) assert np.all(intersect_area <= area1) assert np.all(intersect_area <= area2) union_area = np.maximum(area1 + area2 - intersect_area, min) return intersect_area / union_area
Example 11
Project: yolo2-pytorch Author: ruiminshen File: numpy.py License: GNU Lesser General Public License v3.0 | 6 votes |
def iou_matrix(yx_min1, yx_max1, yx_min2, yx_max2, min=None): """ Calculates the IoU of two lists of bounding boxes. :author 申瑞珉 (Ruimin Shen) :param yx_min1: The top left coordinates (y, x) of the first list (size [N1, 2]) of bounding boxes. :param yx_max1: The bottom right coordinates (y, x) of the first list (size [N1, 2]) of bounding boxes. :param yx_min2: The top left coordinates (y, x) of the second list (size [N2, 2]) of bounding boxes. :param yx_max2: The bottom right coordinates (y, x) of the second list (size [N2, 2]) of bounding boxes. :return: The matrix (size [N1, N2]) of the IoU. """ if min is None: min = np.finfo(yx_min1.dtype).eps assert np.all(yx_min1 <= yx_max1) assert np.all(yx_min2 <= yx_max2) intersect_area = intersection_area(yx_min1, yx_max1, yx_min2, yx_max2) area1 = np.expand_dims(np.multiply.reduce(yx_max1 - yx_min1, -1), 1) area2 = np.expand_dims(np.multiply.reduce(yx_max2 - yx_min2, -1), 0) assert np.all(intersect_area >= 0) assert np.all(intersect_area <= area1) assert np.all(intersect_area <= area2) union_area = np.maximum(area1 + area2 - intersect_area, min) return intersect_area / union_area
Example 12
Project: yolo2-pytorch Author: ruiminshen File: torch.py License: GNU Lesser General Public License v3.0 | 6 votes |
def batch_iou_pair(yx_min1, yx_max1, yx_min2, yx_max2, min=float(np.finfo(np.float32).eps)): """ Pairwisely calculates the IoU of two lists (at the same size M) of bounding boxes for N independent batches. :author 申瑞珉 (Ruimin Shen) :param yx_min1: The top left coordinates (y, x) of the first lists (size [N, M, 2]) of bounding boxes. :param yx_max1: The bottom right coordinates (y, x) of the first lists (size [N, M, 2]) of bounding boxes. :param yx_min2: The top left coordinates (y, x) of the second lists (size [N, M, 2]) of bounding boxes. :param yx_max2: The bottom right coordinates (y, x) of the second lists (size [N, M, 2]) of bounding boxes. :return: The lists (size [N, M]) of the IoU. """ yx_min = torch.max(yx_min1, yx_min2) yx_max = torch.min(yx_max1, yx_max2) size = torch.clamp(yx_max - yx_min, min=0) intersect_area = torch.prod(size, -1) area1 = torch.prod(yx_max1 - yx_min1, -1) area2 = torch.prod(yx_max2 - yx_min2, -1) union_area = torch.clamp(area1 + area2 - intersect_area, min=min) return intersect_area / union_area
Example 13
Project: reconstructing_faces_from_voices Author: cmu-mlsp File: utils.py License: GNU General Public License v3.0 | 6 votes |
def get_fbank(voice, mfc_obj): # Extract log mel-spectrogra fbank = mfc_obj.sig2logspec(voice).astype('float32') # Mean and variance normalization of each mel-frequency fbank = fbank - fbank.mean(axis=0) fbank = fbank / (fbank.std(axis=0)+np.finfo(np.float32).eps) # If the duration of a voice recording is less than 10 seconds (1000 frames), # repeat the recording until it is longer than 10 seconds and crop. full_frame_number = 1000 init_frame_number = fbank.shape[0] while fbank.shape[0] < full_frame_number: fbank = np.append(fbank, fbank[0:init_frame_number], axis=0) fbank = fbank[0:full_frame_number,:] return fbank
Example 14
Project: recruit Author: Frank-qlu File: test_histograms.py License: Apache License 2.0 | 6 votes |
def do_precision_lower_bound(self, float_small, float_large): eps = np.finfo(float_large).eps arr = np.array([1.0], float_small) range = np.array([1.0 + eps, 2.0], float_large) # test is looking for behavior when the bounds change between dtypes if range.astype(float_small)[0] != 1: return # previously crashed count, x_loc = np.histogram(arr, bins=1, range=range) assert_equal(count, [1]) # gh-10322 means that the type comes from arr - this may change assert_equal(x_loc.dtype, float_small)
Example 15
Project: recruit Author: Frank-qlu File: test_histograms.py License: Apache License 2.0 | 6 votes |
def do_precision_upper_bound(self, float_small, float_large): eps = np.finfo(float_large).eps arr = np.array([1.0], float_small) range = np.array([0.0, 1.0 - eps], float_large) # test is looking for behavior when the bounds change between dtypes if range.astype(float_small)[-1] != 1: return # previously crashed count, x_loc = np.histogram(arr, bins=1, range=range) assert_equal(count, [1]) # gh-10322 means that the type comes from arr - this may change assert_equal(x_loc.dtype, float_small)
Example 16
Project: recruit Author: Frank-qlu File: test_linalg.py License: Apache License 2.0 | 6 votes |
def test_basic_property(self): # Check A = L L^H shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)] dtypes = (np.float32, np.float64, np.complex64, np.complex128) for shape, dtype in itertools.product(shapes, dtypes): np.random.seed(1) a = np.random.randn(*shape) if np.issubdtype(dtype, np.complexfloating): a = a + 1j*np.random.randn(*shape) t = list(range(len(shape))) t[-2:] = -1, -2 a = np.matmul(a.transpose(t).conj(), a) a = np.asarray(a, dtype=dtype) c = np.linalg.cholesky(a) b = np.matmul(c, c.transpose(t).conj()) assert_allclose(b, a, err_msg="{} {}\n{}\n{}".format(shape, dtype, a, c), atol=500 * a.shape[0] * np.finfo(dtype).eps)
Example 17
Project: recruit Author: Frank-qlu File: test_utils.py License: Apache License 2.0 | 6 votes |
def test_float64_pass(self): # The number of units of least precision # In this case, use a few places above the lowest level (ie nulp=1) nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float64) x = 10**x x = np.r_[-x, x] # Addition eps = np.finfo(x.dtype).eps y = x + x*eps*nulp/2. assert_array_almost_equal_nulp(x, y, nulp) # Subtraction epsneg = np.finfo(x.dtype).epsneg y = x - x*epsneg*nulp/2. assert_array_almost_equal_nulp(x, y, nulp)
Example 18
Project: recruit Author: Frank-qlu File: test_utils.py License: Apache License 2.0 | 6 votes |
def test_complex128_pass(self): nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float64) x = 10**x x = np.r_[-x, x] xi = x + x*1j eps = np.finfo(x.dtype).eps y = x + x*eps*nulp/2. assert_array_almost_equal_nulp(xi, x + y*1j, nulp) assert_array_almost_equal_nulp(xi, y + x*1j, nulp) # The test condition needs to be at least a factor of sqrt(2) smaller # because the real and imaginary parts both change y = x + x*eps*nulp/4. assert_array_almost_equal_nulp(xi, y + y*1j, nulp) epsneg = np.finfo(x.dtype).epsneg y = x - x*epsneg*nulp/2. assert_array_almost_equal_nulp(xi, x + y*1j, nulp) assert_array_almost_equal_nulp(xi, y + x*1j, nulp) y = x - x*epsneg*nulp/4. assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
Example 19
Project: recruit Author: Frank-qlu File: test_utils.py License: Apache License 2.0 | 6 votes |
def test_complex64_pass(self): nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float32) x = 10**x x = np.r_[-x, x] xi = x + x*1j eps = np.finfo(x.dtype).eps y = x + x*eps*nulp/2. assert_array_almost_equal_nulp(xi, x + y*1j, nulp) assert_array_almost_equal_nulp(xi, y + x*1j, nulp) y = x + x*eps*nulp/4. assert_array_almost_equal_nulp(xi, y + y*1j, nulp) epsneg = np.finfo(x.dtype).epsneg y = x - x*epsneg*nulp/2. assert_array_almost_equal_nulp(xi, x + y*1j, nulp) assert_array_almost_equal_nulp(xi, y + x*1j, nulp) y = x - x*epsneg*nulp/4. assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
Example 20
Project: recruit Author: Frank-qlu File: test_numeric.py License: Apache License 2.0 | 6 votes |
def test_can_cast_values(self): # gh-5917 for dt in np.sctypes['int'] + np.sctypes['uint']: ii = np.iinfo(dt) assert_(np.can_cast(ii.min, dt)) assert_(np.can_cast(ii.max, dt)) assert_(not np.can_cast(ii.min - 1, dt)) assert_(not np.can_cast(ii.max + 1, dt)) for dt in np.sctypes['float']: fi = np.finfo(dt) assert_(np.can_cast(fi.min, dt)) assert_(np.can_cast(fi.max, dt)) # Custom exception class to test exception propagation in fromiter
Example 21
Project: recruit Author: Frank-qlu File: test_umath.py License: Apache License 2.0 | 6 votes |
def test_against_cmath(self): import cmath points = [-1-1j, -1+1j, +1-1j, +1+1j] name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} atol = 4*np.finfo(complex).eps for func in self.funcs: fname = func.__name__.split('.')[-1] cname = name_map.get(fname, fname) try: cfunc = getattr(cmath, cname) except AttributeError: continue for p in points: a = complex(func(np.complex_(p))) b = cfunc(p) assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b))
Example 22
Project: soccer-matlab Author: utra-robosoccer File: racecarGymEnv.py License: BSD 2-Clause "Simplified" License | 5 votes |
def __init__(self, urdfRoot=pybullet_data.getDataPath(), actionRepeat=50, isEnableSelfCollision=True, isDiscrete=False, renders=False): print("init") self._timeStep = 0.01 self._urdfRoot = urdfRoot self._actionRepeat = actionRepeat self._isEnableSelfCollision = isEnableSelfCollision self._observation = [] self._ballUniqueId = -1 self._envStepCounter = 0 self._renders = renders self._isDiscrete = isDiscrete if self._renders: self._p = bullet_client.BulletClient( connection_mode=pybullet.GUI) else: self._p = bullet_client.BulletClient() self._seed() #self.reset() observationDim = 2 #len(self.getExtendedObservation()) #print("observationDim") #print(observationDim) # observation_high = np.array([np.finfo(np.float32).max] * observationDim) observation_high = np.ones(observationDim) * 1000 #np.inf if (isDiscrete): self.action_space = spaces.Discrete(9) else: action_dim = 2 self._action_bound = 1 action_high = np.array([self._action_bound] * action_dim) self.action_space = spaces.Box(-action_high, action_high) self.observation_space = spaces.Box(-observation_high, observation_high) self.viewer = None
Example 23
Project: soccer-matlab Author: utra-robosoccer File: racecarZEDGymEnv.py License: BSD 2-Clause "Simplified" License | 5 votes |
def __init__(self, urdfRoot=pybullet_data.getDataPath(), actionRepeat=10, isEnableSelfCollision=True, isDiscrete=False, renders=True): print("init") self._timeStep = 0.01 self._urdfRoot = urdfRoot self._actionRepeat = actionRepeat self._isEnableSelfCollision = isEnableSelfCollision self._ballUniqueId = -1 self._envStepCounter = 0 self._renders = renders self._width = 100 self._height = 10 self._isDiscrete = isDiscrete if self._renders: self._p = bullet_client.BulletClient( connection_mode=pybullet.GUI) else: self._p = bullet_client.BulletClient() self._seed() self.reset() observationDim = len(self.getExtendedObservation()) #print("observationDim") #print(observationDim) observation_high = np.array([np.finfo(np.float32).max] * observationDim) if (isDiscrete): self.action_space = spaces.Discrete(9) else: action_dim = 2 self._action_bound = 1 action_high = np.array([self._action_bound] * action_dim) self.action_space = spaces.Box(-action_high, action_high) self.observation_space = spaces.Box(low=0, high=255, shape=(self._height, self._width, 4)) self.viewer = None
Example 24
Project: PHATE Author: KrishnaswamyLab File: vne.py License: GNU General Public License v2.0 | 5 votes |
def compute_von_neumann_entropy(data, t_max=100): """ Determines the Von Neumann entropy of data at varying matrix powers. The user should select a value of t around the "knee" of the entropy curve. Parameters ---------- t_max : int, default: 100 Maximum value of t to test Returns ------- entropy : array, shape=[t_max] The entropy of the diffusion affinities for each value of t Examples -------- >>> import numpy as np >>> import phate >>> X = np.eye(10) >>> X[0,0] = 5 >>> X[3,2] = 4 >>> h = phate.vne.compute_von_neumann_entropy(X) >>> phate.vne.find_knee_point(h) 23 """ _, eigenvalues, _ = svd(data) entropy = [] eigenvalues_t = np.copy(eigenvalues) for _ in range(t_max): prob = eigenvalues_t / np.sum(eigenvalues_t) prob = prob + np.finfo(float).eps entropy.append(-np.sum(prob * np.log(prob))) eigenvalues_t = eigenvalues_t * eigenvalues entropy = np.array(entropy) return np.array(entropy)
Example 25
Project: tf2-yolo3 Author: akkaze File: kmeans.py License: Apache License 2.0 | 5 votes |
def kmeans(dataset, k): num_samples = dataset.shape[0] # first column stores which cluster this sample belongs to, # second column stores the error between this sample and its centroid cluster_assignment = np.zeros((num_samples, 2)) cluster_updated = True ## step 1: init centroids centroids = init_centroids(dataset, k) while cluster_updated: cluster_updated = False # for each sample for i in range(num_samples): #range min_dist = np.finfo(np.float32).max min_index = 0 # for each centroid # step 2: find the centroid who is closest for j in range(k): distance = eucl_dist(centroids[j, :], dataset[i, :]) if distance < min_dist: min_dist, min_index = distance, j # step 3: update its cluster if cluster_assignment[i, 0] != min_index: cluster_updated = True cluster_assignment[i, :] = min_index, min_dist**2 # step 4: update centroids for j in range(k): pts_in_cluster = dataset[np.nonzero(cluster_assignment[:, 0] == j)[0]] centroids[j, :] = np.mean(pts_in_cluster, axis=0) return centroids, cluster_assignment
Example 26
Project: pyscf Author: pyscf File: addons.py License: Apache License 2.0 | 5 votes |
def remove_linear_dep_(mf, threshold=LINEAR_DEP_THRESHOLD, lindep=LINEAR_DEP_TRIGGER, cholesky_threshold=CHOLESKY_THRESHOLD): ''' Args: threshold : float The threshold under which the eigenvalues of the overlap matrix are discarded to avoid numerical instability. lindep : float The threshold that triggers the special treatment of the linear dependence issue. ''' s = mf.get_ovlp() cond = numpy.max(lib.cond(s)) if cond < 1./lindep: return mf logger.info(mf, 'Applying remove_linear_dep_ on SCF object.') logger.debug(mf, 'Overlap condition number %g', cond) if(cond < 1./numpy.finfo(s.dtype).eps): logger.info(mf, 'Using canonical orthogonalization') def eigh(h, s): x = canonical_orth_(s, threshold) xhx = reduce(numpy.dot, (x.T.conj(), h, x)) e, c = numpy.linalg.eigh(xhx) c = numpy.dot(x, c) return e, c mf._eigh = eigh else: logger.info(mf, 'Using partial Cholesky orthogonalization ' '(doi:10.1063/1.5139948, doi:10.1103/PhysRevA.101.032504)') def eigh(h, s): x = partial_cholesky_orth_(s, threshold, cholesky_threshold) xhx = reduce(numpy.dot, (x.T.conj(), h, x)) e, c = numpy.linalg.eigh(xhx) c = numpy.dot(x, c) return e, c mf._eigh = eigh return mf
Example 27
Project: NeuroKit Author: neuropsychology File: mutual_information.py License: MIT License | 5 votes |
def _entropy(X, k=1): """Returns the entropy of X. From https://gist.github.com/GaelVaroquaux/ead9898bd3c973c40429. Parameters ----------- X : array-like or shape (n_samples, n_features) The data the entropy of which is computed k : int (optional) number of nearest neighbors for density estimation Returns ------- float entropy of X. Notes --------- - Kozachenko, L. F. & Leonenko, N. N. 1987 Sample estimate of entropy of a random vector. Probl. Inf. Transm. 23, 95-101. - Evans, D. 2008 A computationally efficient estimator for mutual information, Proc. R. Soc. A 464 (2093), 1203-1215. - Kraskov A, Stogbauer H, Grassberger P. (2004). Estimating mutual information. Phys Rev E 69(6 Pt 2):066138. """ # Distance to kth nearest neighbor r = _nearest_distances(X, k) # squared distances n, d = X.shape volume_unit_ball = (np.pi ** (0.5 * d)) / scipy.special.gamma(0.5 * d + 1) # Perez-Cruz et al. (2008). Estimation of Information Theoretic Measures for # Continuous Random Variables, suggets returning: # return d*mean(log(r))+log(volume_unit_ball)+log(n-1)-log(k) return ( d * np.mean(np.log(r + np.finfo(X.dtype).eps)) + np.log(volume_unit_ball) + scipy.special.psi(n) - scipy.special.psi(k) )
Example 28
Project: me-ica Author: ME-ICA File: dwiparams.py License: GNU Lesser General Public License v2.1 | 5 votes |
def B2q(B, tol=None): ''' Estimate q vector from input B matrix `B` We assume the input `B` is symmetric positive definite. Because the solution is a square root, the sign of the returned vector is arbitrary. We set the vector to have a positive x component by convention. Parameters ---------- B : (3,3) array-like B matrix - symmetric. We do not check the symmetry. tol : None or float absolute tolerance below which to consider eigenvalues of the B matrix to be small enough not to worry about them being negative, in check for positive semi-definite-ness. None (default) results in a fairly tight numerical threshold proportional the maximum eigenvalue Returns ------- q : (3,) vector Estimated q vector from B matrix `B` ''' B = np.asarray(B) w, v = npl.eig(B) if tol is None: tol = np.abs(w.max() * np.finfo(w.dtype).eps) non_trivial = np.abs(w) > tol if np.any(w[non_trivial] < 0): raise ValueError('B not positive semi-definite') inds = np.argsort(w)[::-1] max_ind = inds[0] vector = v[:,max_ind] # because the factor is a sqrt, the sign of the vector is arbitrary. # We arbitrarily set it to have a positive x value. if vector[0] < 0: vector *= -1 return vector * w[max_ind]
Example 29
Project: me-ica Author: ME-ICA File: test_casting.py License: GNU Lesser General Public License v2.1 | 5 votes |
def test_ulp(): assert_equal(ulp(), np.finfo(np.float64).eps) assert_equal(ulp(1.0), np.finfo(np.float64).eps) assert_equal(ulp(np.float32(1.0)), np.finfo(np.float32).eps) assert_equal(ulp(np.float32(1.999)), np.finfo(np.float32).eps) # Integers always return 1 assert_equal(ulp(1), 1) assert_equal(ulp(2**63-1), 1) # negative / positive same assert_equal(ulp(-1), 1) assert_equal(ulp(7.999), ulp(4.0)) assert_equal(ulp(-7.999), ulp(4.0)) assert_equal(ulp(np.float64(2**54-2)), 2) assert_equal(ulp(np.float64(2**54)), 4) assert_equal(ulp(np.float64(2**54)), 4) # Infs, NaNs return nan assert_true(np.isnan(ulp(np.inf))) assert_true(np.isnan(ulp(-np.inf))) assert_true(np.isnan(ulp(np.nan))) # 0 gives subnormal smallest subn64 = np.float64(2**(-1022-52)) subn32 = np.float32(2**(-126-23)) assert_equal(ulp(0.0), subn64) assert_equal(ulp(np.float64(0)), subn64) assert_equal(ulp(np.float32(0)), subn32) # as do multiples of subnormal smallest assert_equal(ulp(subn64 * np.float64(2**52)), subn64) assert_equal(ulp(subn64 * np.float64(2**53)), subn64*2) assert_equal(ulp(subn32 * np.float32(2**23)), subn32) assert_equal(ulp(subn32 * np.float32(2**24)), subn32*2)
Example 30
Project: me-ica Author: ME-ICA File: test_round_trip.py License: GNU Lesser General Public License v2.1 | 5 votes |
def check_params(in_arr, in_type, out_type): arr = in_arr.astype(in_type) # clip infs that can arise from downcasting if arr.dtype.kind == 'f': info = np.finfo(in_type) arr = np.clip(arr, info.min, info.max) try: arr_dash, slope, inter = round_trip(arr, out_type) except (ScalingError, HeaderDataError): return arr, None, None, None return arr, arr_dash, slope, inter