Python sklearn.neighbors.KDTree.valid_metrics() Examples
The following are 13
code examples of sklearn.neighbors.KDTree.valid_metrics().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.neighbors.KDTree
, or try the search function
.
Example #1
Source File: entropy_akshat.py From Emotion-Recogniton-from-EEG-Signals with MIT License | 6 votes |
def app_entropy(x, order=2, metric='chebyshev'): """Approximate Entropy Parameters ---------- x : list or np.array One-dimensional time series of shape (n_times) order : int (default: 2) Embedding dimension. metric : str (default: chebyshev) Name of the metric function used with :class:`~sklearn.neighbors.KDTree`. The list of available metric functions is given by: ``KDTree.valid_metrics``. Returns ------- ae : float Approximate Entropy. """ phi = _app_samp_entropy(x, order=order, metric=metric, approximate=True) return np.subtract(phi[0], phi[1])
Example #2
Source File: entropy_akshat.py From Emotion-Recogniton-from-EEG-Signals with MIT License | 6 votes |
def sample_entropy(x, order=2, metric='chebyshev'): """Sample Entropy. Parameters ---------- x : list or np.array One-dimensional time series of shape (n_times) order : int (default: 2) Embedding dimension. metric : str (default: chebyshev) Name of the metric function used with KDTree. The list of available metric functions is given by: `KDTree.valid_metrics`. Returns ------- se : float Sample Entropy. """ x = np.asarray(x, dtype=np.float64) if metric == 'chebyshev' and x.size < 5000: return _numba_sampen(x, mm=order, r=0.2) else: phi = _app_samp_entropy(x, order=order, metric=metric, approximate=False)
Example #3
Source File: test_kde.py From twitter-stock-recommendation with MIT License | 6 votes |
def test_kde_algorithm_metric_choice(): # Smoke test for various metrics and algorithms rng = np.random.RandomState(0) X = rng.randn(10, 2) # 2 features required for haversine dist. Y = rng.randn(10, 2) for algorithm in ['auto', 'ball_tree', 'kd_tree']: for metric in ['euclidean', 'minkowski', 'manhattan', 'chebyshev', 'haversine']: if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics: assert_raises(ValueError, KernelDensity, algorithm=algorithm, metric=metric) else: kde = KernelDensity(algorithm=algorithm, metric=metric) kde.fit(X) y_dens = kde.score_samples(Y) assert_equal(y_dens.shape, Y.shape[:1])
Example #4
Source File: tests_complexity.py From NeuroKit with MIT License | 5 votes |
def entropy_app_samp_entropy(x, order, metric="chebyshev", approximate=True): _all_metrics = KDTree.valid_metrics if metric not in _all_metrics: raise ValueError( "The given metric (%s) is not valid. The valid " "metric names are: %s" % (metric, _all_metrics) ) phi = np.zeros(2) r = 0.2 * np.std(x, axis=-1, ddof=1) # compute phi(order, r) _emb_data1 = entropy_embed(x, order, 1) if approximate: emb_data1 = _emb_data1 else: emb_data1 = _emb_data1[:-1] count1 = KDTree(emb_data1, metric=metric).query_radius(emb_data1, r, count_only=True).astype(np.float64) # compute phi(order + 1, r) emb_data2 = entropy_embed(x, order + 1, 1) count2 = KDTree(emb_data2, metric=metric).query_radius(emb_data2, r, count_only=True).astype(np.float64) if approximate: phi[0] = np.mean(np.log(count1 / emb_data1.shape[0])) phi[1] = np.mean(np.log(count2 / emb_data2.shape[0])) else: phi[0] = np.mean((count1 - 1) / (emb_data1.shape[0] - 1)) phi[1] = np.mean((count2 - 1) / (emb_data2.shape[0] - 1)) return phi
Example #5
Source File: entropy_akshat.py From Emotion-Recogniton-from-EEG-Signals with MIT License | 5 votes |
def _app_samp_entropy(x, order, metric='chebyshev', approximate=True): """Utility function for `app_entropy`` and `sample_entropy`. """ _all_metrics = KDTree.valid_metrics if metric not in _all_metrics: raise ValueError('The given metric (%s) is not valid. The valid ' 'metric names are: %s' % (metric, _all_metrics)) phi = np.zeros(2) r = 0.2 * np.std(x, axis=-1, ddof=1) # compute phi(order, r) _emb_data1 = _embed(x, order, 1) if approximate: emb_data1 = _emb_data1 else: emb_data1 = _emb_data1[:-1] count1 = KDTree(emb_data1, metric=metric).query_radius(emb_data1, r, count_only=True ).astype(np.float64) # compute phi(order + 1, r) emb_data2 = _embed(x, order + 1, 1) count2 = KDTree(emb_data2, metric=metric).query_radius(emb_data2, r, count_only=True ).astype(np.float64) if approximate: phi[0] = np.mean(np.log(count1 / emb_data1.shape[0])) phi[1] = np.mean(np.log(count2 / emb_data2.shape[0])) else: phi[0] = np.mean((count1 - 1) / (emb_data1.shape[0] - 1)) phi[1] = np.mean((count2 - 1) / (emb_data2.shape[0] - 1)) return phi #The main code
Example #6
Source File: base_neighbors.py From scikit-multiflow with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, n_neighbors=5, max_window_size=1000, leaf_size=30, metric='euclidean'): self.n_neighbors = n_neighbors self.max_window_size = max_window_size self.leaf_size = leaf_size if metric not in self.valid_metrics(): raise ValueError("Invalid metric: {}.\n" "Valid options are: {}".format(metric, self.valid_metrics())) self.metric = metric self.data_window = SlidingWindow(window_size=max_window_size)
Example #7
Source File: base_neighbors.py From scikit-multiflow with BSD 3-Clause "New" or "Revised" License | 5 votes |
def valid_metrics(): """ Get valid distance metrics for the KDTree. """ return KDTree.valid_metrics
Example #8
Source File: test_kde.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_kde_algorithm_metric_choice(algorithm, metric): # Smoke test for various metrics and algorithms rng = np.random.RandomState(0) X = rng.randn(10, 2) # 2 features required for haversine dist. Y = rng.randn(10, 2) if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics: assert_raises(ValueError, KernelDensity, algorithm=algorithm, metric=metric) else: kde = KernelDensity(algorithm=algorithm, metric=metric) kde.fit(X) y_dens = kde.score_samples(Y) assert_equal(y_dens.shape, Y.shape[:1])
Example #9
Source File: univariate.py From mne-features with BSD 3-Clause "New" or "Revised" License | 5 votes |
def compute_app_entropy(data, emb=2, metric='chebyshev'): """Approximate Entropy (AppEn, per channel). Parameters ---------- data : ndarray, shape (n_channels, n_times) emb : int (default: 2) Embedding dimension. metric : str (default: chebyshev) Name of the metric function used with :class:`~sklearn.neighbors.KDTree`. The list of available metric functions is given by: ``KDTree.valid_metrics``. Returns ------- output : ndarray, shape (n_channels,) Notes ----- Alias of the feature function: **app_entropy**. See [1]_. References ---------- .. [1] Richman, J. S. et al. (2000). Physiological time-series analysis using approximate entropy and sample entropy. American Journal of Physiology-Heart and Circulatory Physiology, 278(6), H2039-H2049. """ phi = _app_samp_entropy_helper(data, emb=emb, metric=metric, approximate=True) return np.subtract(phi[:, 0], phi[:, 1])
Example #10
Source File: univariate.py From mne-features with BSD 3-Clause "New" or "Revised" License | 5 votes |
def compute_samp_entropy(data, emb=2, metric='chebyshev'): """Sample Entropy (SampEn, per channel). Parameters ---------- data : ndarray, shape (n_channels, n_times) emb : int (default: 2) Embedding dimension. metric : str (default: chebyshev) Name of the metric function used with KDTree. The list of available metric functions is given by: `KDTree.valid_metrics`. Returns ------- output : ndarray, shape (n_channels,) Notes ----- Alias of the feature function: **samp_entropy**. See [1]_. References ---------- .. [1] Richman, J. S. et al. (2000). Physiological time-series analysis using approximate entropy and sample entropy. American Journal of Physiology-Heart and Circulatory Physiology, 278(6), H2039-H2049. """ phi = _app_samp_entropy_helper(data, emb=emb, metric=metric, approximate=False) if np.allclose(phi[:, 0], 0) or np.allclose(phi[:, 1], 0): raise ValueError('Sample Entropy is not defined.') else: return -np.log(np.divide(phi[:, 1], phi[:, 0]))
Example #11
Source File: entropy.py From entropy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _app_samp_entropy(x, order, metric='chebyshev', approximate=True): """Utility function for `app_entropy`` and `sample_entropy`. """ _all_metrics = KDTree.valid_metrics if metric not in _all_metrics: raise ValueError('The given metric (%s) is not valid. The valid ' 'metric names are: %s' % (metric, _all_metrics)) phi = np.zeros(2) r = 0.2 * np.std(x, ddof=0) # compute phi(order, r) _emb_data1 = _embed(x, order, 1) if approximate: emb_data1 = _emb_data1 else: emb_data1 = _emb_data1[:-1] count1 = KDTree(emb_data1, metric=metric).query_radius(emb_data1, r, count_only=True ).astype(np.float64) # compute phi(order + 1, r) emb_data2 = _embed(x, order + 1, 1) count2 = KDTree(emb_data2, metric=metric).query_radius(emb_data2, r, count_only=True ).astype(np.float64) if approximate: phi[0] = np.mean(np.log(count1 / emb_data1.shape[0])) phi[1] = np.mean(np.log(count2 / emb_data2.shape[0])) else: phi[0] = np.mean((count1 - 1) / (emb_data1.shape[0] - 1)) phi[1] = np.mean((count2 - 1) / (emb_data2.shape[0] - 1)) return phi
Example #12
Source File: test_kde.py From Mastering-Elasticsearch-7.0 with MIT License | 4 votes |
def test_kde_sample_weights(): n_samples = 400 size_test = 20 weights_neutral = np.full(n_samples, 3.) for d in [1, 2, 10]: rng = np.random.RandomState(0) X = rng.rand(n_samples, d) weights = 1 + (10 * X.sum(axis=1)).astype(np.int8) X_repetitions = np.repeat(X, weights, axis=0) n_samples_test = size_test // d test_points = rng.rand(n_samples_test, d) for algorithm in ['auto', 'ball_tree', 'kd_tree']: for metric in ['euclidean', 'minkowski', 'manhattan', 'chebyshev']: if algorithm != 'kd_tree' or metric in KDTree.valid_metrics: kde = KernelDensity(algorithm=algorithm, metric=metric) # Test that adding a constant sample weight has no effect kde.fit(X, sample_weight=weights_neutral) scores_const_weight = kde.score_samples(test_points) sample_const_weight = kde.sample(random_state=1234) kde.fit(X) scores_no_weight = kde.score_samples(test_points) sample_no_weight = kde.sample(random_state=1234) assert_allclose(scores_const_weight, scores_no_weight) assert_allclose(sample_const_weight, sample_no_weight) # Test equivalence between sampling and (integer) weights kde.fit(X, sample_weight=weights) scores_weight = kde.score_samples(test_points) sample_weight = kde.sample(random_state=1234) kde.fit(X_repetitions) scores_ref_sampling = kde.score_samples(test_points) sample_ref_sampling = kde.sample(random_state=1234) assert_allclose(scores_weight, scores_ref_sampling) assert_allclose(sample_weight, sample_ref_sampling) # Test that sample weights has a non-trivial effect diff = np.max(np.abs(scores_no_weight - scores_weight)) assert diff > 0.001 # Test invariance with respect to arbitrary scaling scale_factor = rng.rand() kde.fit(X, sample_weight=(scale_factor * weights)) scores_scaled_weight = kde.score_samples(test_points) assert_allclose(scores_scaled_weight, scores_weight)
Example #13
Source File: univariate.py From mne-features with BSD 3-Clause "New" or "Revised" License | 4 votes |
def _app_samp_entropy_helper(data, emb, metric='chebyshev', approximate=True): """Utility function for `compute_app_entropy`` and `compute_samp_entropy`. Parameters ---------- data : ndarray, shape (n_channels, n_times) emb : int (default: 2) Embedding dimension. metric : str (default: chebyshev) Name of the metric function used with KDTree. The list of available metric functions is given by: ``KDTree.valid_metrics``. approximate : bool (default: True) If True, the returned values will be used to compute the Approximate Entropy (AppEn). Otherwise, the values are used to compute the Sample Entropy (SampEn). Returns ------- output : ndarray, shape (n_channels, 2) """ _all_metrics = KDTree.valid_metrics if metric not in _all_metrics: raise ValueError('The given metric (%s) is not valid. The valid ' 'metric names are: %s' % (metric, _all_metrics)) n_channels, n_times = data.shape phi = np.empty((n_channels, 2)) for j in range(n_channels): r = 0.2 * np.std(data[j, :], axis=-1, ddof=1) # compute phi(emb, r) _emb_data1 = _embed(data[j, None], emb, 1)[0, :, :] if approximate: emb_data1 = _emb_data1 else: emb_data1 = _emb_data1[:-1, :] count1 = KDTree(emb_data1, metric=metric).query_radius( emb_data1, r, count_only=True).astype(np.float64) # compute phi(emb + 1, r) emb_data2 = _embed(data[j, None], emb + 1, 1)[0, :, :] count2 = KDTree(emb_data2, metric=metric).query_radius( emb_data2, r, count_only=True).astype(np.float64) if approximate: phi[j, 0] = np.mean(np.log(count1 / emb_data1.shape[0])) phi[j, 1] = np.mean(np.log(count2 / emb_data2.shape[0])) else: phi[j, 0] = np.mean((count1 - 1) / (emb_data1.shape[0] - 1)) phi[j, 1] = np.mean((count2 - 1) / (emb_data2.shape[0] - 1)) return phi