# Python scipy.stats.boxcox() Examples

The following are code examples for showing how to use scipy.stats.boxcox(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
```def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5

# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])

# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292]) ```
Example 2
 Project: att   Author: Centre-Alt-Rendiment-Esportiu   File: test_morestats.py    GNU General Public License v3.0 6 votes
```def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5

# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])

# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292]) ```
Example 3
 Project: skutil   Author: tgsmith61591   File: transform.py    BSD 3-Clause "New" or "Revised" License 6 votes
```def _estimate_lambda_single_y(y):
"""Estimate lambda for a single y, given a range of lambdas
through which to search. No validation performed.

Parameters
----------

y : ndarray, shape (n_samples,)
The vector being estimated against
"""

# ensure is array
y = np.array(y)

# Use scipy's log-likelihood estimator
b = boxcox(y, lmbda=None)

# Return lambda corresponding to maximum P
return b[1] ```
Example 4
```def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5

# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])

# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292]) ```
Example 5
```def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5

# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])

# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292]) ```
Example 6
```def fit(self, X, y=None):
"""Fit translate and lambda attributes to X data.

Args:
X (:obj:`numpy.ndarray`): Fit data
y: input labels

Returns:
self

"""
X = check_array(X)
min_ = np.nanmin(X)
self.translate_ = -min_ if min_ <= 0 else 0
_, self.lambda_ = boxcox(X + 1 + self.translate_)
return self ```
Example 7
```def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5

# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])

# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292]) ```
Example 8
```def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5

# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])

# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292]) ```
Example 9
```def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5

# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])

# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292]) ```
Example 10
```def _fit_boxcox(self, X):
""" Transform features using a boxcox transform.

Parameters
----------
X : np.array [n_samples, n_features]
Untransformed training features.

Returns
-------
X_boxcox : np.array [n_samples, n_features]
Transformed training features.
"""
_, self.n_feats = X.shape

X_boxcox = np.zeros(X.shape)
lmbda_opt = np.zeros((self.n_feats,))

for i in range(self.n_feats):
X_boxcox[:, i], lmbda_opt[i] = boxcox(
X[:, i] + EPS
)
self.lmbda = lmbda_opt
return X_boxcox ```
Example 11
```def _transform(self, X):
""" Transform an input feature matrix using the trained boxcox
parameters.

Parameters
----------
X : np.array [n_samples, n_features]
Input features.

Returns
-------
X_boxcox : np.array [n_samples, n_features]
Transformed features.

"""
X_boxcox = np.zeros(X.shape)
for i in range(self.n_feats):
X_boxcox[:, i] = boxcox(
X[:, i] + EPS, lmbda=self.lmbda[i]
)
return X_boxcox ```
Example 12
```def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that.  boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12) ```
Example 13
```def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)

xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))

# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x)) ```
Example 14
```def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)

assert_almost_equal(maxlog, -1 / lmbda, decimal=2) ```
Example 15
```def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x) ```
Example 16
```def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)

# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog) ```
Example 17
 Project: Archrisk   Author: UCSBarchlab   File: boxcox.py    BSD 3-Clause "New" or "Revised" License 5 votes
```def transform(samples, shape):
samples = np.asarray(samples)
return boxcox(samples, shape) ```
Example 18
 Project: att   Author: Centre-Alt-Rendiment-Esportiu   File: test_morestats.py    GNU General Public License v3.0 5 votes
```def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that.  boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12) ```
Example 19
 Project: att   Author: Centre-Alt-Rendiment-Esportiu   File: test_morestats.py    GNU General Public License v3.0 5 votes
```def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)

xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))

# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x)) ```
Example 20
 Project: att   Author: Centre-Alt-Rendiment-Esportiu   File: test_morestats.py    GNU General Public License v3.0 5 votes
```def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)

assert_almost_equal(maxlog, -1 / lmbda, decimal=2) ```
Example 21
 Project: att   Author: Centre-Alt-Rendiment-Esportiu   File: test_morestats.py    GNU General Public License v3.0 5 votes
```def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x) ```
Example 22
 Project: att   Author: Centre-Alt-Rendiment-Esportiu   File: test_morestats.py    GNU General Public License v3.0 5 votes
```def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)

# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog) ```
Example 23
```def fit(self, y, exogenous=None):
"""Fit the transformer

Learns the value of ``lmbda``, if not specified in the constructor.
If defined in the constructor, is not re-learned.

Parameters
----------
y : array-like or None, shape=(n_samples,)
The endogenous (time-series) array.

exogenous : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
"""
lam1 = self.lmbda
lam2 = self.lmbda2

if lam2 < 0:
raise ValueError("lmbda2 must be a non-negative scalar value")

if lam1 is None:
y, _ = self._check_y_exog(y, exogenous)
_, lam1 = stats.boxcox(y + lam2, lmbda=None, alpha=None)

self.lam1_ = lam1
self.lam2_ = lam2
return self ```
Example 24
```def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that.  boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12) ```
Example 25
```def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)

xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))

# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x)) ```
Example 26
```def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)

assert_almost_equal(maxlog, -1 / lmbda, decimal=2) ```
Example 27
```def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x) ```
Example 28
```def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)

# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog) ```
Example 29
```def test_boxcox_bad_arg():
"""Raise ValueError if any data value is negative."""
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x) ```
Example 30
```def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that.  boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12) ```
Example 31
```def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)

xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))

# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x)) ```
Example 32
```def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)

assert_almost_equal(maxlog, -1 / lmbda, decimal=2) ```
Example 33
```def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x) ```
Example 34
```def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)

# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog) ```
Example 35
```def fit_transform(self, X: dt.Frame, y: np.array = None):
XX = X.to_pandas().iloc[:, 0].values
is_na = np.isnan(XX)
self._offset = -np.nanmin(XX) if np.nanmin(XX) < 0 else 0
self._offset += 1e-3
self._lmbda = None
if not any(~is_na):
return X
self._lmbda = boxcox(self._offset + XX[~is_na], lmbda=self._lmbda)[1]  # compute lambda
return self.transform(X) ```
Example 36
```def transform(self, X: dt.Frame):
XX = X.to_pandas().iloc[:, 0].values
is_na = np.isnan(XX) | np.array(XX <= -self._offset)
if not any(~is_na) or self._lmbda is None:
return X
ret = boxcox(self._offset + XX[~is_na], lmbda=self._lmbda)  # apply transform with pre-computed lambda
XX[~is_na] = ret
return XX ```
Example 37
```def transform(self, X):
"""Perform Box Cox transform on input.

Args:
X (:obj:`numpy.ndarray`): X data

Returns:
:obj:`numpy.ndarray`: Transformed data

"""
X = check_array(X, copy=True)
check_is_fitted(self, ["translate_", "lambda_"])
X = boxcox(X + 1 + self.translate_, self.lambda_)
return X ```
Example 38
```def box_cox(table):
"""
box-cox transform table
"""
from scipy.stats import boxcox as bc
t = []
for i in table:
if min(i) == 0:
scale = min([j for j in i if j != 0]) * 10e-10
else:
scale = 0
t.append(np.ndarray.tolist(bc(np.array([j + scale for j in i]))[0]))
return t ```
Example 39
```def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that.  boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12) ```
Example 40
```def test_gh_6873(self):
# Regression test for gh-6873.
# This example was taken from gh-7534, a duplicate of gh-6873.
data = [198.0, 233.0, 233.0, 392.0]
llf = stats.boxcox_llf(-8, data)
# The expected value was computed with mpmath.
assert_allclose(llf, -17.93934208579061)

# This is the data from github user Qukaiyi, given as an example
# of a data set that caused boxcox to fail. ```
Example 41
```def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)

xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))

# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x)) ```
Example 42
```def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)

assert_almost_equal(maxlog, -1 / lmbda, decimal=2) ```
Example 43
```def test_empty(self):
assert_(stats.boxcox([]).shape == (0,)) ```
Example 44
```def test_gh_6873(self):
# Regression test for gh-6873.
y, lam = stats.boxcox(_boxcox_data)
# The expected value of lam was computed with the function
# powerTransform in the R library 'car'.  I trust that value
# to only about five significant digits.
assert_allclose(lam, -0.051654, rtol=1e-5) ```
Example 45
```def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)

# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog) ```
Example 46
```def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that.  boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12) ```
Example 47
```def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)

xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))

# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x)) ```
Example 48
```def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)

assert_almost_equal(maxlog, -1 / lmbda, decimal=2) ```
Example 49
```def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x) ```
Example 50
```def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)

# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog) ```
Example 51
```def box_cox_func(a):
box_cox_data, _ = boxcox(a)

return Series(box_cox_data) ```
Example 52
```def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that.  boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12) ```
Example 53
```def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)

xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))

# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x)) ```
Example 54
```def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)

assert_almost_equal(maxlog, -1 / lmbda, decimal=2) ```
Example 55
```def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x) ```
Example 56
```def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)

# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog) ```
Example 57
```def _bc_est_lam(y, min_value, dtype, suppress_warnings):
"""Estimate the lambda param for box-cox transformations.

Estimate lambda for a single y, given a range of lambdas
through which to search. No validation performed.

Parameters
----------
y : np.ndarray, shape (n_samples,)
The vector from which lambda is being estimated
"""
# ensure is array, floor at min_value
y = np.maximum(np.asarray(y).astype(dtype), min_value)

# Use scipy's log-likelihood estimator (suppress the inner optimization
# routine otherwise it gets pretty annoyingly verbose)
@suppress
def _boxcox_inner():
return boxcox(y, lmbda=None)

# if we want to suppress, decorate now
if suppress_warnings:
_boxcox_inner = suppress(_boxcox_inner)
b = _boxcox_inner()

# Return lambda corresponding to maximum P
return b[1] ```
Example 58
 Project: senior-design   Author: james-tate   File: test_morestats.py    GNU General Public License v2.0 5 votes
```def test_boxcox_bad_arg():
"""Raise ValueError if any data value is negative."""
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x) ```
Example 59
 Project: Charm   Author: UCSBarchlab   File: boxcox.py    BSD 3-Clause "New" or "Revised" License 5 votes
```def transform(samples, shape):
samples = np.asarray(samples)
return boxcox(samples, shape) ```
Example 60
 Project: SourceFilterContoursMelody   Author: juanjobosch   File: mv_gaussian.py    GNU General Public License v3.0 5 votes
```def transform_features(x_train, x_test):
""" Transform features using a boxcox transform. Remove vibrato features.
Comptes the optimal value of lambda on the training set and applies this
lambda to the testing set.

Parameters
----------
x_train : np.array [n_samples, n_features]
Untransformed training features.
x_test : np.array [n_samples, n_features]
Untransformed testing features.

Returns
-------
x_train_boxcox : np.array [n_samples, n_features_trans]
Transformed training features.
x_test_boxcox : np.array [n_samples, n_features_trans]
Transformed testing features.
"""
x_train = x_train[:, 0:6]
x_test = x_test[:, 0:6]

_, n_feats = x_train.shape

x_train_boxcox = np.zeros(x_train.shape)
lmbda_opt = np.zeros((n_feats,))

eps = 1.0  # shift features away from zero
for i in range(n_feats):
x_train_boxcox[:, i], lmbda_opt[i] = boxcox(x_train[:, i] + eps)

x_test_boxcox = np.zeros(x_test.shape)
for i in range(n_feats):
x_test_boxcox[:, i] = boxcox(x_test[:, i] + eps, lmbda=lmbda_opt[i])

return x_train_boxcox, x_test_boxcox ```