# Python numpy.fill_diagonal() Examples

The following are 30 code examples of numpy.fill_diagonal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module , or try the search function .
Example #1
```def _calc_score(self, X):

if isinstance(self.criterion, str):

if self.criterion == "maxmin":
D = cdist(X, X)
np.fill_diagonal(D, np.inf)
return np.min(D)

elif self.criterion == "correlation":
M = np.corrcoef(X.T, rowvar=True)
return -np.sum(np.tril(M, -1) ** 2)

else:
raise Exception("Unknown criterion.")
elif callable(self.criterion):
return self.criterion(X)

else:
raise Exception("Either provide a str or a function as a criterion!") ```
Example #2
```def geometric_mean_var(z):
for row in np.eye(z.shape[1]):
if not np.any(np.all(row == z, axis=1)):
z = np.row_stack([z, row])
n_points, n_dim = z.shape

D = vectorized_cdist(z, z)
np.fill_diagonal(D, np.inf)

k = n_dim - 1
I = D.argsort(axis=1)[:, :k]

first = np.column_stack([np.arange(n_points) for _ in range(k)])

val = gmean(D[first, I], axis=1)

return val.var() ```
Example #3
```def pyeeg_samp_entropy(X, M, R):
N = len(X)

Em = pyeeg_embed_seq(X, 1, M)[:-1]
A = np.tile(Em, (len(Em), 1, 1))
B = np.transpose(A, [1, 0, 2])
D = np.abs(A - B)  # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = np.max(D, axis=2) <= R
np.fill_diagonal(InRange, 0)  # Don't count self-matches

Cm = InRange.sum(axis=0)  # Probability that random M-sequences are in range
Dp = np.abs(np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T)

Cmp = np.logical_and(Dp <= R, InRange).sum(axis=0)

# Avoid taking log(0)
Samp_En = np.log(np.sum(Cm + 1e-100) / np.sum(Cmp + 1e-100))

return Samp_En

# =============================================================================
# Entropy
# ============================================================================= ```
Example #4
```def _fit(self):

# Versions using sparse matrices
# ident = sparse.identity(len(self._G.nodes)).tocsc()
# sim = inv(ident - adj.multiply(self.beta).T) - ident
# aux.setdiag(1+aux.diagonal(), k=0)
# sim = inv(aux)
# sim.setdiag(sim.diagonal()-1)
# print(sim.nnz)

# Version using dense matrices
np.fill_diagonal(aux, 1+aux.diagonal())
sim = np.linalg.inv(aux)
np.fill_diagonal(sim, sim.diagonal()-1)
return sim ```
Example #5
```def generate_neg_edges(G, testing_edges_num, seed=42):
nnodes = len(G.nodes)
# Make a full graph (matrix of 1)
negG = np.ones((nnodes, nnodes))
np.fill_diagonal(negG, 0.)
# Substract existing edges from full graph
# generates negative graph
negG = negG - original_graph
# get negative edges (nonzero entries)
neg_edges = np.where(negG > 0)
random.seed(seed) # replicability!
rng_edges = random.sample(range(neg_edges[0].size), testing_edges_num)
# return edges in (src, dst) tuple format
return list(zip(
neg_edges[0][rng_edges],
neg_edges[1][rng_edges]
)) ```
Example #6
```def add_noise(cor, epsilon=None, m=None):
if isinstance(cor, pd.DataFrame):
cor = cor.values
elif isinstance(cor, np.ndarray) is False:
cor = np.array(cor)

n = cor.shape[1]

if epsilon is None:
epsilon = 0.05
if m is None:
m = 2

np.fill_diagonal(cor, 1 - epsilon)

cor = SimulateCorrelationMatrix._generate_noise(cor, n, m, epsilon)

return cor ```
Example #7
```def constant(self):
delta = np.min(self.rho) - 0.01
cormat = np.full((self.nkdim, self.nkdim), delta)

epsilon = 0.99 - np.max(self.rho)
for i in np.arange(self.k):
cor = np.full((self.nk[i], self.nk[i]), self.rho[i])

if i == 0:
cormat[0:self.nk[0], 0:self.nk[0]] = cor
if i != 0:
cormat[np.sum(self.nk[0:i]):np.sum(self.nk[0:i + 1]),
np.sum(self.nk[0:i]):np.sum(self.nk[0:i + 1])] = cor

np.fill_diagonal(cormat, 1 - epsilon)

cormat = self._generate_noise(cormat, self.nkdim, self.M, epsilon)

return cormat ```
Example #8
```def toepz(self):
cormat = np.zeros((self.nkdim, self.nkdim))

epsilon = (1 - np.max(self.rho)) / (1 + np.max(self.rho)) - .01

for i in np.arange(self.k):
t = np.insert(np.power(self.rho[i], np.arange(1, self.nk[i])), 0, 1)
cor = toeplitz(t)
if i == 0:
cormat[0:self.nk[0], 0:self.nk[0]] = cor
if i != 0:
cormat[np.sum(self.nk[0:i]):np.sum(self.nk[0:i + 1]),
np.sum(self.nk[0:i]):np.sum(self.nk[0:i + 1])] = cor

np.fill_diagonal(cormat, 1 - epsilon)

cormat = self._generate_noise(cormat, self.nkdim, self.M, epsilon)

return cormat ```
Example #9
```def hub(self):
cormat = np.zeros((self.nkdim, self.nkdim))

for i in np.arange(self.k):
cor = toeplitz(self._fill_hub_matrix(self.rho[i, 0], self.rho[i, 1], self.power, self.nk[i]))
if i == 0:
cormat[0:self.nk[0], 0:self.nk[0]] = cor
if i != 0:
cormat[np.sum(self.nk[0:i]):np.sum(self.nk[0:i + 1]),
np.sum(self.nk[0:i]):np.sum(self.nk[0:i + 1])] = cor
tau = (np.max(self.rho[i]) - np.min(self.rho[i])) / (self.nk[i] - 2)

epsilon = 0.08 #(1 - np.min(rho) - 0.75 * np.min(tau)) - 0.01

np.fill_diagonal(cormat, 1 - epsilon)

cormat = self._generate_noise(cormat, self.nkdim, self.M, epsilon)

return cormat ```
Example #10
```def grad(self, inp, cost_grad):
"""
Notes
-----
The gradient is currently implemented for matrices only.

"""
a, val = inp
if (a.dtype.startswith('complex')):
return [None, None]
elif a.ndim > 2:
raise NotImplementedError('%s: gradient is currently implemented'
' for matrices only' %
self.__class__.__name__)
wr_a = fill_diagonal(grad, 0)  # valid for any number of dimensions
# diag is only valid for matrices
return [wr_a, wr_val] ```
Example #11
```def test_perform(self):
x = tensor.matrix()
y = tensor.scalar()
f = function([x, y], fill_diagonal(x, y))
for shp in [(8, 8), (5, 8), (8, 5)]:
a = numpy.random.rand(*shp).astype(config.floatX)
val = numpy.cast[config.floatX](numpy.random.rand())
out = f(a, val)
# We can't use numpy.fill_diagonal as it is bugged.
assert numpy.allclose(numpy.diag(out), val)
assert (out == val).sum() == min(a.shape)

# test for 3d tensor
a = numpy.random.rand(3, 3, 3).astype(config.floatX)
x = tensor.tensor3()
y = tensor.scalar()
f = function([x, y], fill_diagonal(x, y))
val = numpy.cast[config.floatX](numpy.random.rand() + 10)
out = f(a, val)
# We can't use numpy.fill_diagonal as it is bugged.
assert out[0, 0, 0] == val
assert out[1, 1, 1] == val
assert out[2, 2, 2] == val
assert (out == val).sum() == min(a.shape) ```
Example #12
```def test_perform(self):
x = tensor.matrix()
y = tensor.scalar()
z = tensor.iscalar()

f = function([x, y, z], fill_diagonal_offset(x, y, z))
for test_offset in (-5, -4, -1, 0, 1, 4, 5):
for shp in [(8, 8), (5, 8), (8, 5), (5, 5)]:
a = numpy.random.rand(*shp).astype(config.floatX)
val = numpy.cast[config.floatX](numpy.random.rand())
out = f(a, val, test_offset)
# We can't use numpy.fill_diagonal as it is bugged.
assert numpy.allclose(numpy.diag(out, test_offset), val)
if test_offset >= 0:
assert (out == val).sum() == min( min(a.shape),
a.shape[1]-test_offset )
else:
assert (out == val).sum() == min( min(a.shape),
a.shape[0]+test_offset ) ```
Example #13
```def perform(self, node, inputs, outputs):
# Kalbfleisch and Lawless, J. Am. Stat. Assoc. 80 (1985) Equation 3.4
# Kind of... You need to do some algebra from there to arrive at
# this expression.
(A, gA) = inputs
(out,) = outputs
w, V = scipy.linalg.eig(A, right=True)
U = scipy.linalg.inv(V).T

exp_w = numpy.exp(w)
X = numpy.subtract.outer(exp_w, exp_w) / numpy.subtract.outer(w, w)
numpy.fill_diagonal(X, exp_w)
Y = U.dot(V.T.dot(gA).dot(U) * X).dot(V.T)

with warnings.catch_warnings():
warnings.simplefilter("ignore", numpy.ComplexWarning)
out[0] = Y.astype(A.dtype) ```
Example #14
```def test_cov_nearest_factor_homog_sparse(self):

d = 100

for dm in 1,2:

# Construct a test matrix with exact factor structure
X = np.zeros((d,dm), dtype=np.float64)
x = np.linspace(0, 2*np.pi, d)
for j in range(dm):
X[:,j] = np.sin(x*(j+1))
mat = np.dot(X, X.T)
np.fill_diagonal(mat, np.diag(mat) + 3.1)

# Fit to dense
rslt = cov_nearest_factor_homog(mat, dm)
mat1 = rslt.to_matrix()

# Fit to sparse
smat = sparse.csr_matrix(mat)
rslt = cov_nearest_factor_homog(smat, dm)
mat2 = rslt.to_matrix()

assert_allclose(mat1, mat2, rtol=0.25, atol=1e-3) ```
Example #15
```def __init__(self, one_body, two_body, constant=0.):
if two_body.dtype != numpy.float:
raise ValueError('Two-body tensor has invalid dtype. Expected {} '
'but was {}'.format(numpy.float, two_body.dtype))
if not numpy.allclose(two_body, two_body.T):
raise ValueError('Two-body tensor must be symmetric.')
if not numpy.allclose(one_body, one_body.T.conj()):
raise ValueError('One-body tensor must be Hermitian.')

# Move the diagonal of two_body to one_body
diag_indices = numpy.diag_indices(one_body.shape[0])
one_body[diag_indices] += two_body[diag_indices]
numpy.fill_diagonal(two_body, 0.)

self.one_body = one_body
self.two_body = two_body
self.constant = constant ```
Example #16
```def LagrangeGaussLobatto(C,xi):

n = C+2
ranger = np.arange(n)

A = np.zeros((n,n))
A[:,0] = 1.
for i in range(1,n):
A[:,i] = eps**i
# A1[:,1:] = np.array([eps**i for i in range(1,n)]).T[0,:,:]

RHS = np.zeros((n,n))
np.fill_diagonal(RHS,1)
coeff = np.linalg.solve(A,RHS)
xis = np.ones(n)*xi**ranger
N = np.dot(coeff.T,xis)
# dN = np.einsum('i,ij,i',1+ranger[:-1],coeff[1:,:],xis[:-1])
dN = np.dot(coeff[1:,:].T,xis[:-1]*(1+ranger[:-1]))

return (N,dN,eps) ```
Example #17
```def sp_to_adjency(C, threshinf=0.2, threshsup=1.8):
""" Thresholds the structure matrix in order to compute an adjency matrix.
All values between threshinf and threshsup are considered representing connected nodes and set to 1. Else are set to 0
Parameters
----------
C : ndarray, shape (n_nodes,n_nodes)
The structure matrix to threshold
threshinf : float
The minimum value of distance from which the new value is set to 1
threshsup : float
The maximum value of distance from which the new value is set to 1
Returns
-------
C : ndarray, shape (n_nodes,n_nodes)
The threshold matrix. Each element is in {0,1}
"""
H = np.zeros_like(C)
np.fill_diagonal(H, np.diagonal(C))
C = C - H
C = np.minimum(np.maximum(C, threshinf), threshsup)
C[C == threshsup] = 0
C[C != 0] = 1

return C ```
Example #18
```def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""

if Y is None:
Y = X
X, Y, dtype = _return_float_dtype(X, Y)

if effective_n_jobs(n_jobs) == 1:
return func(X, Y, **kwds)

fd = delayed(_dist_wrapper)
ret = np.empty((X.shape[0], Y.shape[0]), dtype=dtype, order='F')
fd(func, ret, s, X, Y[s], **kwds)
for s in gen_even_slices(_num_samples(Y), effective_n_jobs(n_jobs)))

if (X is Y or Y is None) and func is euclidean_distances:
# zeroing diagonal for euclidean norm.
# TODO: do it also for other norms.
np.fill_diagonal(ret, 0)

return ret ```
Example #19
```def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)

n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0

distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components).astype(np.float32)

P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)

def fun(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[0]

return _kl_divergence(params, P, alpha, n_samples, n_components)[1]

decimal=5) ```
Example #20
```def test_small_distance_threshold():
rng = np.random.RandomState(0)
n_samples = 10
X = rng.randint(-300, 300, size=(n_samples, 3))
# this should result in all data in their own clusters, given that
# their pairwise distances are bigger than .1 (which may not be the case
# with a different random seed).
clustering = AgglomerativeClustering(
n_clusters=None,
distance_threshold=1.,
# check that the pairwise distances are indeed all larger than .1
all_distances = pairwise_distances(X, metric='minkowski', p=2)
np.fill_diagonal(all_distances, np.inf)
assert np.all(all_distances > .1)
assert clustering.n_clusters_ == n_samples ```
Example #21
```def test_cluster_distances_with_distance_threshold():
rng = np.random.RandomState(0)
n_samples = 100
X = rng.randint(-10, 10, size=(n_samples, 3))
# check the distances within the clusters and with other clusters
distance_threshold = 4
clustering = AgglomerativeClustering(
n_clusters=None,
distance_threshold=distance_threshold,
labels = clustering.labels_
D = pairwise_distances(X, metric="minkowski", p=2)
# to avoid taking the 0 diagonal in min()
np.fill_diagonal(D, np.inf)
for label in np.unique(labels):
.min(axis=0).max())
.min(axis=0).min())
# single data point clusters only have that inf diagonal here
assert max_in_cluster_distance < distance_threshold
assert min_out_cluster_distance >= distance_threshold ```
Example #22
```def expected_p(self):
"""
Compute the expected probability of a connection, averaging over c
:return:
"""
if self.fixed:
return self.P

E_p = np.zeros((self.K, self.K))
for c1 in range(self.C):
for c2 in range(self.C):
# Get the KxK matrix of joint class assignment probabilities
pc1c2 = self.mf_m[:,c1][:, None] * self.mf_m[:,c2][None, :]

# Get the probability of a connection for this pair of classes
E_p += pc1c2 * self.mf_tau1[c1,c2] / (self.mf_tau0[c1,c2] + self.mf_tau1[c1,c2])

if not self.allow_self_connections:
np.fill_diagonal(E_p, 0.0)

return E_p ```
Example #23
```def expected_log_p(self):
"""
Compute the expected log probability of a connection, averaging over c
:return:
"""
if self.fixed:
E_ln_p = np.log(self.P)
else:
E_ln_p = np.zeros((self.K, self.K))
for c1 in range(self.C):
for c2 in range(self.C):
# Get the KxK matrix of joint class assignment probabilities
pc1c2 = self.mf_m[:,c1][:, None] * self.mf_m[:,c2][None, :]

# Get the probability of a connection for this pair of classes
E_ln_p += pc1c2 * (psi(self.mf_tau1[c1,c2])
- psi(self.mf_tau0[c1,c2] + self.mf_tau1[c1,c2]))

if not self.allow_self_connections:
np.fill_diagonal(E_ln_p, -np.inf)

return E_ln_p ```
Example #24
```def expected_log_notp(self):
"""
Compute the expected log probability of NO connection, averaging over c
:return:
"""
if self.fixed:
E_ln_notp = np.log(1.0 - self.P)
else:
E_ln_notp = np.zeros((self.K, self.K))
for c1 in range(self.C):
for c2 in range(self.C):
# Get the KxK matrix of joint class assignment probabilities
pc1c2 = self.mf_m[:,c1][:, None] * self.mf_m[:,c2][None, :]

# Get the probability of a connection for this pair of classes
E_ln_notp += pc1c2 * (psi(self.mf_tau0[c1,c2])
- psi(self.mf_tau0[c1,c2] + self.mf_tau1[c1,c2]))

if not self.allow_self_connections:
np.fill_diagonal(E_ln_notp, 0.0)

return E_ln_notp ```
Example #25
```def test_atlas_connectivity(betaseries_file, atlas_file, atlas_lut):

# expected output
pcorr = np.corrcoef(bs_data.squeeze())
np.fill_diagonal(pcorr, np.NaN)
regions = atlas_lut_df['regions'].values
pcorr_df = pd.DataFrame(pcorr, index=regions, columns=regions)
expected_zcorr_df = pcorr_df.apply(lambda x: (np.log(1 + x) - np.log(1 - x)) * 0.5)

# run instance of AtlasConnectivity
ac = AtlasConnectivity(timeseries_file=str(betaseries_file),
atlas_file=str(atlas_file),
atlas_lut=str(atlas_lut))

res = ac.run()

na_values='n/a',
delimiter='\t',
index_col=0)

os.remove(res.outputs.correlation_matrix)
# test equality of the matrices up to 3 decimals
pd.testing.assert_frame_equal(output_zcorr_df, expected_zcorr_df,
check_less_precise=3) ```
Example #26
```def calc_potential_energy_with_grad(x, d, return_mutual_dist=False):
diff = (x[:, None] - x[None, :])
# calculate the squared euclidean from each point to another
dist = np.sqrt((diff ** 2).sum(axis=2))

# make sure the distance to itself does not count
np.fill_diagonal(dist, np.inf)

# epsilon which is the smallest distance possible to avoid an overflow during gradient calculation
eps = 10 ** (-320 / (d + 2))
b = dist < eps
dist[b] = eps

# select only upper triangular matrix to have each mutual distance once
mutual_dist = dist[np.triu_indices(len(x), 1)]

# calculate the energy by summing up the squared distances
energy = (1 / mutual_dist ** d).sum()
log_energy = - np.log(len(mutual_dist)) + np.log(energy)

grad = (-d * diff) / (dist ** (d + 2))[..., None]

if return_mutual_dist:
ret.append(mutual_dist)

return tuple(ret) ```
Example #27
```def closest_point_variance(z):
for row in np.eye(z.shape[1]):
if not np.any(np.all(row == z, axis=1)):
z = np.row_stack([z, row])

D = vectorized_cdist(z, z)
np.fill_diagonal(D, 1)

return D.min(axis=1).var() ```
Example #28
```def closest_point_variance_mod(z):
n_points, n_dim = z.shape

for row in np.eye(z.shape[1]):
if not np.any(np.all(row == z, axis=1)):
z = np.row_stack([z, row])

D = vectorized_cdist(z, z)
np.fill_diagonal(D, np.inf)

k = int(np.ceil(np.sqrt(n_dim)))
I = D.argsort(axis=1)[:, k - 1]

return D[np.arange(n_points), I].var() ```
Example #29
```def vectorized_cdist(A, B, func_dist=euclidean_distance, fill_diag_with_inf=False, **kwargs):
u = np.repeat(A, B.shape[0], axis=0)
v = np.tile(B, (A.shape[0], 1))

D = func_dist(u, v, **kwargs)
M = np.reshape(D, (A.shape[0], B.shape[0]))

if fill_diag_with_inf:
np.fill_diagonal(M, np.inf)

return M ```
Example #30
```def distance_of_closest_points_to_others(X):