# Python numpy.zeros_like() Examples

The following are 30 code examples for showing how to use numpy.zeros_like(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module , or try the search function .

Example 1
```def sgdmomentum(self, cost, params,constraints={}, lr=0.01,consider_constant=None, momentum=0.):
"""
Stochatic gradient descent with momentum. Momentum has to be in [0, 1)
"""
# Check that the momentum is a correct value
assert 0 <= momentum < 1

lr = theano.shared(np.float32(lr).astype(floatX))
momentum = theano.shared(np.float32(momentum).astype(floatX))

velocities = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

new_velocity = momentum * velocity - lr * gradient
new_p=param+new_velocity;
# apply constraints
if param in constraints:
c=constraints[param];
new_p=c(new_p);
Example 2
```def adagrad(self, cost, params, lr=1.0, epsilon=1e-6,consider_constant=None):
"""
"""
lr = theano.shared(np.float32(lr).astype(floatX))
epsilon = theano.shared(np.float32(epsilon).astype(floatX))

gsums = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

new_gsum = gsum + gradient ** 2.
Example 3
```def adadelta(self, cost, params, rho=0.95, epsilon=1e-6,consider_constant=None):
"""
"""
rho = theano.shared(np.float32(rho).astype(floatX))
epsilon = theano.shared(np.float32(epsilon).astype(floatX))

accu_gradients = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]
accu_deltas = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

delta_x = - T.sqrt((accu_delta + epsilon) / (new_accu_gradient + epsilon)) * gradient
new_accu_delta = rho * accu_delta + (1. - rho) * delta_x ** 2.
Example 4
```def rmsprop(self, cost, params, lr=0.001, rho=0.9, eps=1e-6,consider_constant=None):
"""
RMSProp.
"""
lr = theano.shared(np.float32(lr).astype(floatX))

accumulators = [theano.shared(np.zeros_like(p.get_value()).astype(np.float32)) for p in params]

new_accumulator = rho * accumulator + (1 - rho) * gradient ** 2

new_param = param - lr * gradient / T.sqrt(new_accumulator + eps)

Example 5
```def data_augmentation(self, x_train):
_, c, h, w = x_train.shape
aug_data = np.zeros_like(x_train)
for i, x in enumerate(x_train):

# Randomly crop and horizontal flip the image
top = np.random.randint(0, pad_h - h + 1)
left = np.random.randint(0, pad_w - w + 1)
bottom = top + h
right = left + w
if np.random.randint(0, 2):

return aug_data ```
Example 6
```def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg):
"""
TODO
:param densities_pos:
:param densities_neg:
:param uncerts_pos:
:param uncerts_neg:
:return:
"""
values_neg = np.concatenate(
(densities_neg.reshape((1, -1)),
uncerts_neg.reshape((1, -1))),
axis=0).transpose([1, 0])
values_pos = np.concatenate(
(densities_pos.reshape((1, -1)),
uncerts_pos.reshape((1, -1))),
axis=0).transpose([1, 0])

values = np.concatenate((values_neg, values_pos))
labels = np.concatenate(
(np.zeros_like(densities_neg), np.ones_like(densities_pos)))

lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels)

return values, labels, lr ```
Example 7
```def compute_roc_rfeinman(probs_neg, probs_pos, plot=False):
"""
TODO
:param probs_neg:
:param probs_pos:
:param plot:
:return:
"""
probs = np.concatenate((probs_neg, probs_pos))
labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos)))
fpr, tpr, _ = roc_curve(labels, probs)
auc_score = auc(fpr, tpr)
if plot:
plt.figure(figsize=(7, 6))
plt.plot(fpr, tpr, color='blue',
label='ROC (AUC = %0.4f)' % auc_score)
plt.legend(loc='lower right')
plt.title("ROC Curve")
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.show()

return fpr, tpr, auc_score ```
Example 8
```def numerical_gradient(f, x):
h = 1e-4

for idx in range(x.size):
tmp_val = x[idx]
# f(x+h) 的计算
x[idx] = tmp_val + h
fxh1 = f(x)
# f(x-h) 的计算
x[idx] = tmp_val - h
fxh2 = f(x)

grad[idx] = (fxh1 - fxh2) / (2 * h)
x[idx] = tmp_val

# 梯度下降 ```
Example 9
```def _numerical_gradient_1d(f, x):
h = 1e-4  # 0.0001

for idx in range(x.size):
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x)  # f(x+h)

x[idx] = tmp_val - h
fxh2 = f(x)  # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2 * h)

x[idx] = tmp_val  # 还原值

Example 10
```def numerical_gradient(f, x):
h = 1e-4  # 0.0001

while not it.finished:
idx = it.multi_index
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x)  # f(x+h)

x[idx] = tmp_val - h
fxh2 = f(x)  # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2 * h)

x[idx] = tmp_val  # 还原值
it.iternext()

Example 11
```def update(self, index, weight, grad, state):
self._update_count(index)
wd = self._get_wd(index)
lr = self._get_lr(index)
num_rows = weight.shape[0]

dn, n = state
for row in range(num_rows):
if all_zeros and self.lazy_update:
continue

#update dn, n

# update weight
weight[row] = (mx.nd.sign(dn[row]) * self.lamda1 - dn[row]) / \
((self.beta + mx.nd.sqrt(n[row])) / lr + wd) * (mx.nd.abs(dn[row]) > self.lamda1) ```
Example 12
```def conjugate_gradient(f_Ax, b, cg_iters=10, residual_tol=1e-10):
p = b.copy()
r = b.copy()
x = np.zeros_like(b)
rdotr = r.dot(r)
for i in xrange(cg_iters):
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v * p
r -= v * z
newrdotr = r.dot(r)
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr < residual_tol:
break
return x ```
Example 13
```def count_super(p, m, counters, preds, labels, label_to_ch):

for l in np.unique(labels):
preds_l = preds[labels == l]

# in -> known
if label_to_ch[l]:
acc = np.zeros_like(preds_l, dtype=bool)
for c in label_to_ch[l]:
if p == 0: counters['data'][m][c] += preds_l.shape[0]
acc |= (preds_l == c)
acc_sum = acc.sum()
for c in label_to_ch[l]:
counters['acc'][p,m][c] += acc_sum

# out -> novel
else:
if p == 0: counters['data'][m][-1] += preds_l.shape[0]
acc_sum = (preds_l < 0).sum()
counters['acc'][p,m][-1] += acc_sum ```
Example 14
```def evaluate(self, data_input_obs, data_input_ruitu, data_labels, data_ids, data_time, each_station_display=False):
all_loss=[]
for i in range(10): # iterate for each station. (sample_ind, timestep, staionID, features)
#batch_placeholders = np.zeros_like(data_labels[:,:,i,:])
val_loss= self.model.evaluate(x=[data_input_obs[:,:,i,:], data_input_ruitu[:,:,i,:], data_ids[:,:,i], data_time],
y=[data_labels[:,:,i,:]], verbose=False)

all_loss.append(val_loss)

if each_station_display:
print('\tFor station 9000{}, val loss: {}'.format(i+1, val_loss))

self.current_mean_val_loss = np.mean(all_loss)
print('Mean val loss:', self.current_mean_val_loss)

self.val_loss_list.append(self.current_mean_val_loss) ```
Example 15
```def evaluate(self, data_input_obs, data_input_ruitu, data_labels, data_ids, data_time, each_station_display=False):
all_loss=[]
for i in range(10): # iterate for each station. (sample_ind, timestep, staionID, features)
#batch_placeholders = np.zeros_like(data_labels[:,:,i,:])
val_loss= self.model.evaluate(x=[data_input_obs[:,:,i,:], data_input_ruitu[:,:,i,:], data_ids[:,:,i], data_time],
y=[data_labels[:,:,i,:]], verbose=False)

all_loss.append(val_loss)

if each_station_display:
print('\tFor station 9000{}, val MLE loss: {}'.format(i+1, val_loss))

self.current_mean_val_loss = np.mean(all_loss)
print('Mean val MLE loss:', self.current_mean_val_loss)

self.val_loss_list.append(self.current_mean_val_loss) ```
Example 16
```def _encode_observation(self, idx):
end_idx   = idx + 1 # make noninclusive
start_idx = end_idx - self.frame_history_len
# this checks if we are using low-dimensional observations, such as RAM
# state, in which case we just directly return the latest RAM.
if len(self.obs.shape) == 2:
return self.obs[end_idx-1]
# if there weren't enough frames ever in the buffer for context
if start_idx < 0 and self.num_in_buffer != self.size:
start_idx = 0
for idx in range(start_idx, end_idx - 1):
if self.done[idx % self.size]:
start_idx = idx + 1
missing_context = self.frame_history_len - (end_idx - start_idx)
# if zero padding is needed for missing context
# or we are on the boundry of the buffer
if start_idx < 0 or missing_context > 0:
frames = [np.zeros_like(self.obs[0]) for _ in range(missing_context)]
for idx in range(start_idx, end_idx):
frames.append(self.obs[idx % self.size])
return np.concatenate(frames, 2)
else:
# this optimization has potential to saves about 30% compute time \o/
img_h, img_w = self.obs.shape[1], self.obs.shape[2]
return self.obs[start_idx:end_idx].transpose(1, 2, 0, 3).reshape(img_h, img_w, -1) ```
Example 17
```def _filter_arm_data(self, X, a, r, choice):
if self.assume_un:
this_choice = (a == choice)
arms_w_rew = (r > 0.)
yclass = np.where(arms_w_rew & (~this_choice), np.zeros_like(r), r)
this_choice = this_choice | arms_w_rew
yclass = yclass[this_choice]
else:
this_choice = (a == choice)
yclass = r[this_choice]

## Note: don't filter X here as in many cases it won't end up used
return yclass, this_choice

### TODO: these parallelizations probably shouldn't use sharedmem,
### but they still need to somehow modify the random states ```
Example 18
```def mapLandmarksVec(p, s, m):
p_1, p_2 = p[0], p[1]
s_1, s_2 = s[0], s[1]

new_val = np.zeros_like(p_1)
same_inds = (p_1 == p_2)
if np.sum(same_inds):
print('Fix this')
sys.exit()
#Change with this if I encounter bug
#new_val[same_inds] = s_1[same_inds].reshape(-1)
#new_val[np.inverse(same_inds)] = (((m - p_1) * ((s_2 - s_1) / (p_2 - p_1))) + s_1).reshape(-1)

#sys.exit()
#new_val = ((m - p_1) * ((s_2 - s_1) / (p_2 - p_1))) + s_1

return ((m-p_1) / (p_2-p_1) * (s_2 - s_1)) + s_1 ```
Example 19
```def _depth_montage(depths):
if depths.ndim == 4:
assert depths.shape[1] == 1
depths = depths[:,0,:,:]
#depths = imgutil.scale_values(depths, min=-2.5, max=2.5)
#depths = map(imgutil.scale_values, depths)
for i in xrange(len(depths)):
x = depths[i]
if len(x) == 0:
d = np.zeros_like(depths[i])
else:
d = imgutil.scale_values(depths[i], min=x.min(), max=x.max())
depths[i] = d
depths = plt.cm.jet(depths)[...,:3]
for i in xrange(len(depths)):
for c in xrange(3):
depths[i, :, :, c][masks[i] == 0] = 0.2
return imgutil.montage(depths, border=1) ```
Example 20
```def RMSprop(cost, params, lr=0.001, rho=0.9, epsilon=1e-6, grads=None):
# From:
# https://github.com/Newmu/Theano-Tutorials/blob/master/4_modern_net.py

for p, g in zip(params, grads):
acc = theano.shared(np.zeros_like(p.get_value(), dtype=np.float32),
name="%s/rms/acc" % p.name)
acc_new = rho * acc + (1 - rho) * g ** 2
updates.append((p, p - lr * g))
Example 21
```def conjugate_gradient(self, b):
"""
Returns F^(-1)b where F is the Hessian of the KL divergence
"""
p = b.clone().data
r = b.clone().data
x = np.zeros_like(b.data.cpu().numpy())
rdotr = r.double().dot(r.double())
for _ in xrange(self.cg_iters):
z = self.hessian_vector_product(Variable(p)).squeeze(0)
v = rdotr / p.double().dot(z.double())
x += v * p.cpu().numpy()
r -= v * z
newrdotr = r.double().dot(r.double())
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr < self.residual_tol:
break
return x ```
Example 22
```def attack(self, X, Y):
"""
Perform the L_2 attack on the given images for the given targets.

:param X: samples to generate advs
:param Y: the original class labels
If self.targeted is true, then the targets represents the target labels.
If self.targeted is false, then targets are the original class labels.
"""
nb_classes = Y.shape[1]

# random select target class for targeted attack
y_target = np.copy(Y)
if self.TARGETED:
for i in range(Y.shape[0]):
current = int(np.argmax(Y[i]))
target = np.random.choice(other_classes(nb_classes, current))
y_target[i] = np.eye(nb_classes)[target]

for i in tqdm(range(0, X.shape[0], self.batch_size)):
start = i
end = i + self.batch_size
end = np.minimum(end, X.shape[0])

Example 23
```def attack(self, X, Y):
"""
Perform the L_2 attack on the given images for the given targets.

:param X: samples to generate advs
:param Y: the original class labels
If self.targeted is true, then the targets represents the target labels.
If self.targeted is false, then targets are the original class labels.
"""
nb_classes = Y.shape[1]

# random select target class for targeted attack
y_target = np.copy(Y)
if self.TARGETED:
for i in range(Y.shape[0]):
current = int(np.argmax(Y[i]))
target = np.random.choice(other_classes(nb_classes, current))
y_target[i] = np.eye(nb_classes)[target]

for i in tqdm(range(0, X.shape[0], self.batch_size)):
start = i
end = i + self.batch_size
end = np.minimum(end, X.shape[0])

Example 24
```def attack(self, X, Y):
"""
Perform the L_2 attack on the given images for the given targets.

:param X: samples to generate advs
:param Y: the original class labels
If self.targeted is true, then the targets represents the target labels.
If self.targeted is false, then targets are the original class labels.
"""
nb_classes = Y.shape[1]

# random select target class for targeted attack
y_target = np.copy(Y)
if self.TARGETED:
for i in range(Y.shape[0]):
current = int(np.argmax(Y[i]))
target = np.random.choice(other_classes(nb_classes, current))
y_target[i] = np.eye(nb_classes)[target]

for i in tqdm(range(0, X.shape[0], self.batch_size)):
start = i
end = i + self.batch_size
end = np.minimum(end, X.shape[0])

Example 25
 Project: animal-tracking   Author: colinlaney   File: track.py    License: Creative Commons Zero v1.0 Universal 5 votes
```def drawFloorCrop(event, x, y, flags, params):
global perspectiveMatrix, name, RENEW_TETRAGON
imgCroppingPolygon = np.zeros_like(params['imgFloorCorners'])
if event == cv2.EVENT_RBUTTONUP:
cv2.destroyWindow(f'Floor Corners for {name}')
if len(params['croppingPolygons'][name]) > 4 and event == cv2.EVENT_LBUTTONUP:
RENEW_TETRAGON = True
h = params['imgFloorCorners'].shape[0]
# delete 5th extra vertex of the floor cropping tetragon
params['croppingPolygons'][name] = np.delete(params['croppingPolygons'][name], -1, 0)
params['croppingPolygons'][name] = params['croppingPolygons'][name] - [h,0]

# Sort cropping tetragon vertices counter-clockwise starting with top left
params['croppingPolygons'][name] = counterclockwiseSort(params['croppingPolygons'][name])
# Get the matrix of perspective transformation
params['croppingPolygons'][name] = np.reshape(params['croppingPolygons'][name], (4,2))
tetragonVertices = np.float32(params['croppingPolygons'][name])
tetragonVerticesUpd = np.float32([[0,0], [0,h], [h,h], [h,0]])
perspectiveMatrix[name] = cv2.getPerspectiveTransform(tetragonVertices, tetragonVerticesUpd)
if event == cv2.EVENT_LBUTTONDOWN:
if len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON:
params['croppingPolygons'][name] = np.array([[0,0]])
RENEW_TETRAGON = False
if len(params['croppingPolygons'][name]) == 1:
params['croppingPolygons'][name][0] = [x,y]
params['croppingPolygons'][name] = np.append(params['croppingPolygons'][name], [[x,y]], axis=0)
if event == cv2.EVENT_MOUSEMOVE and not (len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON):
params['croppingPolygons'][name][-1] = [x,y]
if len(params['croppingPolygons'][name]) > 1:
cv2.fillPoly(
imgCroppingPolygon,
[np.reshape(
params['croppingPolygons'][name],
(len(params['croppingPolygons'][name]),2)
)],
BGR_COLOR['green'], cv2.LINE_AA)
imgCroppingPolygon = cv2.addWeighted(params['imgFloorCorners'], 1.0, imgCroppingPolygon, 0.5, 0.)
cv2.imshow(f'Floor Corners for {name}', imgCroppingPolygon) ```
Example 26
```def update(self, params, grads):
if self.v is None:
self.v = {}
for key, val in params.items():
self.v[key] = np.zeros_like(val)

for key in params.keys():
self.v[key] = self.momentum * self.v[key] - self.lr * grads[key]
params[key] += self.v[key] ```
Example 27
```def update(self, params, grads):
if self.v is None:
self.v = {}
for key, val in params.items():
self.v[key] = np.zeros_like(val)

for key in params.keys():
self.v[key] *= self.momentum
params[key] += self.momentum * self.momentum * self.v[key]
params[key] -= (1 + self.momentum) * self.lr * grads[key] ```
Example 28
```def update(self, params, grads):
if self.h is None:
self.h = {}
for key, val in params.items():
self.h[key] = np.zeros_like(val)

for key in params.keys():
params[key] -= self.lr * grads[key] / (np.sqrt(self.h[key]) + 1e-7) ```
Example 29
```def update(self, params, grads):
if self.h is None:
self.h = {}
for key, val in params.items():
self.h[key] = np.zeros_like(val)

for key in params.keys():
self.h[key] *= self.decay_rate
```def numerical_gradient_2d(f, X):