Python numpy.sum() Examples
The following are 30
code examples of numpy.sum().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.

Example #1
Source File: tcpr.py From libTLDA with MIT License | 7 votes |
def add_intercept(self, X): """Add 1's to data as last features.""" # Data shape N, D = X.shape # Check if there's not already an intercept column if np.any(np.sum(X, axis=0) == N): # Report print('Intercept is not the last feature. Swapping..') # Find which column contains the intercept intercept_index = np.argwhere(np.sum(X, axis=0) == N) # Swap intercept to last X = X[:, np.setdiff1d(np.arange(D), intercept_index)] # Add intercept as last column X = np.hstack((X, np.ones((N, 1)))) # Append column of 1's to data, and increment dimensionality return X, D+1
Example #2
Source File: 5_nueral_network.py From deep-learning-note with MIT License | 6 votes |
def cost(params, input_size, hidden_size, num_labels, X, y, learning_rate): m = X.shape[0] X = np.matrix(X) y = np.matrix(y) # reshape the parameter array into parameter matrices for each layer theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1)))) theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1)))) # run the feed-forward pass a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2) # compute the cost J = 0 for i in range(m): first_term = np.multiply(-y[i,:], np.log(h[i,:])) second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:])) J += np.sum(first_term - second_term) J = J / m # add the cost regularization term J += (float(learning_rate) / (2 * m)) * (np.sum(np.power(theta1[:,1:], 2)) + np.sum(np.power(theta2[:,1:], 2))) return J
Example #3
Source File: 3_logistic_regression.py From deep-learning-note with MIT License | 6 votes |
def gradientReg(theta, X, y, learningRate): theta = np.matrix(theta) X = np.matrix(X) y = np.matrix(y) parameters = int(theta.ravel().shape[1]) grad = np.zeros(parameters) error = sigmoid(X * theta.T) - y for i in range(parameters): term = np.multiply(error, X[:,i]) if (i == 0): grad[i] = np.sum(term) / len(X) else: grad[i] = (np.sum(term) / len(X)) + ((learningRate / len(X)) * theta[:,i]) return grad
Example #4
Source File: 9_anomaly_and_rec.py From deep-learning-note with MIT License | 6 votes |
def cost(params, Y, R, num_features): Y = np.matrix(Y) # (1682, 943) R = np.matrix(R) # (1682, 943) num_movies = Y.shape[0] num_users = Y.shape[1] # reshape the parameter array into parameter matrices X = np.matrix(np.reshape(params[:num_movies * num_features], (num_movies, num_features))) # (1682, 10) Theta = np.matrix(np.reshape(params[num_movies * num_features:], (num_users, num_features))) # (943, 10) # initializations J = 0 # compute the cost error = np.multiply((X * Theta.T) - Y, R) # (1682, 943) squared_error = np.power(error, 2) # (1682, 943) J = (1. / 2) * np.sum(squared_error) return J
Example #5
Source File: test_bayestar.py From dustmaps with GNU General Public License v2.0 | 6 votes |
def test_bounds(self): """ Test that out-of-bounds coordinates return NaN reddening, and that in-bounds coordinates do not return NaN reddening. """ for mode in (['random_sample', 'random_sample_per_pix', 'median', 'samples', 'mean']): # Draw random coordinates, both above and below dec = -30 degree line n_pix = 1000 ra = -180. + 360.*np.random.random(n_pix) dec = -75. + 90.*np.random.random(n_pix) # 45 degrees above/below c = coords.SkyCoord(ra, dec, frame='icrs', unit='deg') ebv_calc = self._bayestar(c, mode=mode) nan_below = np.isnan(ebv_calc[dec < -35.]) nan_above = np.isnan(ebv_calc[dec > -25.]) pct_nan_above = np.sum(nan_above) / float(nan_above.size) # print r'{:s}: {:.5f}% nan above dec=-25 deg.'.format(mode, 100.*pct_nan_above) self.assertTrue(np.all(nan_below)) self.assertTrue(pct_nan_above < 0.05)
Example #6
Source File: 9_anomaly_and_rec.py From deep-learning-note with MIT License | 6 votes |
def select_threshold(pval, yval): best_epsilon = 0 best_f1 = 0 f1 = 0 step = (pval.max() - pval.min()) / 1000 for epsilon in np.arange(pval.min(), pval.max(), step): preds = pval < epsilon tp = np.sum(np.logical_and(preds == 1, yval == 1)).astype(float) fp = np.sum(np.logical_and(preds == 1, yval == 0)).astype(float) fn = np.sum(np.logical_and(preds == 0, yval == 1)).astype(float) precision = tp / (tp + fp) recall = tp / (tp + fn) f1 = (2 * precision * recall) / (precision + recall) if f1 > best_f1: best_f1 = f1 best_epsilon = epsilon return best_epsilon, best_f1
Example #7
Source File: metrics.py From DDPAE-video-prediction with MIT License | 6 votes |
def find_match(self, pred, gt): ''' Match component to balls. ''' batch_size, n_frames_input, n_components, _ = pred.shape diff = pred.reshape(batch_size, n_frames_input, n_components, 1, 2) - \ gt.reshape(batch_size, n_frames_input, 1, n_components, 2) diff = np.sum(np.sum(diff ** 2, axis=-1), axis=1) # Direct indices indices = np.argmin(diff, axis=2) ambiguous = np.zeros(batch_size, dtype=np.int8) for i in range(batch_size): _, counts = np.unique(indices[i], return_counts=True) if not np.all(counts == 1): ambiguous[i] = 1 return indices, ambiguous
Example #8
Source File: 5_nueral_network.py From deep-learning-note with MIT License | 6 votes |
def cost0(params, input_size, hidden_size, num_labels, X, y, learning_rate): m = X.shape[0] X = np.matrix(X) y = np.matrix(y) # reshape the parameter array into parameter matrices for each layer theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1)))) theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1)))) # run the feed-forward pass a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2) # compute the cost J = 0 for i in range(m): first_term = np.multiply(-y[i,:], np.log(h[i,:])) second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:])) J += np.sum(first_term - second_term) J = J / m return J
Example #9
Source File: test_iphas.py From dustmaps with GNU General Public License v2.0 | 6 votes |
def test_bounds(self): """ Test that out-of-bounds coordinates return NaN reddening, and that in-bounds coordinates do not return NaN reddening. """ for mode in (['random_sample', 'random_sample_per_pix', 'median', 'samples', 'mean']): # Draw random coordinates on the sphere n_pix = 10000 u, v = np.random.random((2,n_pix)) l = 360. * u b = 90. - np.degrees(np.arccos(2.*v - 1.)) c = coords.SkyCoord(l, b, frame='galactic', unit='deg') A_calc = self._iphas(c, mode=mode) in_bounds = (l > 32.) & (l < 213.) & (b < 4.5) & (b > -4.5) out_of_bounds = (l < 28.) | (l > 217.) | (b > 7.) | (b < -7.) n_nan_in_bounds = np.sum(np.isnan(A_calc[in_bounds])) n_finite_out_of_bounds = np.sum(np.isfinite(A_calc[out_of_bounds])) self.assertTrue(n_nan_in_bounds == 0) self.assertTrue(n_finite_out_of_bounds == 0)
Example #10
Source File: dynamic.py From StructEngPy with MIT License | 6 votes |
def solve_modal(model,k:int): """ Solve eigen mode of the MDOF system params: model: FEModel. k: number of modes to extract. """ K_,M_=model.K_,model.M_ if k>model.DOF: logger.info('Warning: the modal number to extract is larger than the system DOFs, only %d modes are available'%model.DOF) k=model.DOF omega2s,modes = sl.eigsh(K_,k,M_,sigma=0,which='LM') delta = modes/np.sum(modes,axis=0) model.is_solved=True model.mode_=delta model.omega_=np.sqrt(omega2s).reshape((k,1))
Example #11
Source File: multi_layer_net.py From deep-learning-note with MIT License | 6 votes |
def loss(self, x, t): """求损失函数 Parameters ---------- x : 输入数据 t : 教师标签 Returns ------- 损失函数的值 """ y = self.predict(x) weight_decay = 0 for idx in range(1, self.hidden_layer_num + 2): W = self.params['W' + str(idx)] weight_decay += 0.5 * self.weight_decay_lambda * np.sum(W ** 2) return self.last_layer.forward(y, t) + weight_decay
Example #12
Source File: main.py From tensorflow-DeepFM with MIT License | 6 votes |
def _load_data(): dfTrain = pd.read_csv(config.TRAIN_FILE) dfTest = pd.read_csv(config.TEST_FILE) def preprocess(df): cols = [c for c in df.columns if c not in ["id", "target"]] df["missing_feat"] = np.sum((df[cols] == -1).values, axis=1) df["ps_car_13_x_ps_reg_03"] = df["ps_car_13"] * df["ps_reg_03"] return df dfTrain = preprocess(dfTrain) dfTest = preprocess(dfTest) cols = [c for c in dfTrain.columns if c not in ["id", "target"]] cols = [c for c in cols if (not c in config.IGNORE_COLS)] X_train = dfTrain[cols].values y_train = dfTrain["target"].values X_test = dfTest[cols].values ids_test = dfTest["id"].values cat_features_indices = [i for i,c in enumerate(cols) if c in config.CATEGORICAL_COLS] return dfTrain, dfTest, X_train, y_train, X_test, ids_test, cat_features_indices
Example #13
Source File: dataloader_m.py From models with MIT License | 6 votes |
def _prepro_cpg(self, states, dists): """Preprocess the state and distance of neighboring CpG sites.""" prepro_states = [] prepro_dists = [] for state, dist in zip(states, dists): nan = state == dat.CPG_NAN if np.any(nan): state[nan] = np.random.binomial(1, state[~nan].mean(), nan.sum()) dist[nan] = self.cpg_max_dist dist = np.minimum(dist, self.cpg_max_dist) / self.cpg_max_dist prepro_states.append(np.expand_dims(state, 1)) prepro_dists.append(np.expand_dims(dist, 1)) prepro_states = np.concatenate(prepro_states, axis=1) prepro_dists = np.concatenate(prepro_dists, axis=1) if self.cpg_wlen: center = prepro_states.shape[2] // 2 delta = self.cpg_wlen // 2 tmp = slice(center - delta, center + delta) prepro_states = prepro_states[:, :, tmp] prepro_dists = prepro_dists[:, :, tmp] return (prepro_states, prepro_dists)
Example #14
Source File: Embed.py From pytorch_NER_BiLSTM_CNN_CRF with Apache License 2.0 | 6 votes |
def _avg_embed(self, embed_dict, words_dict): """ :param embed_dict: :param words_dict: """ print("loading pre_train embedding by avg for out of vocabulary.") embeddings = np.zeros((int(self.words_count), int(self.dim))) inword_list = {} for word in words_dict: if word in embed_dict: embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32') inword_list[words_dict[word]] = 1 self.exact_count += 1 elif word.lower() in embed_dict: embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32') inword_list[words_dict[word]] = 1 self.fuzzy_count += 1 else: self.oov_count += 1 sum_col = np.sum(embeddings, axis=0) / len(inword_list) # avg for i in range(len(words_dict)): if i not in inword_list and i != self.padID: embeddings[i] = sum_col final_embed = torch.from_numpy(embeddings).float() return final_embed
Example #15
Source File: 8_kmeans_pca.py From deep-learning-note with MIT License | 5 votes |
def compute_centroids(X, idx, k): m, n = X.shape centroids = np.zeros((k, n)) for i in range(k): indices = np.where(idx == i) centroids[i,:] = (np.sum(X[indices,:], axis=1) / len(indices[0])).ravel() return centroids
Example #16
Source File: layers.py From deep-learning-note with MIT License | 5 votes |
def backward(self, dout): FN, C, FH, FW = self.W.shape dout = dout.transpose(0, 2, 3, 1).reshape(-1, FN) self.db = np.sum(dout, axis=0) self.dW = np.dot(self.col.T, dout) self.dW = self.dW.transpose(1, 0).reshape(FN, C, FH, FW) dcol = np.dot(dout, self.col_W.T) dx = col2im(dcol, self.x.shape, FH, FW, self.stride, self.pad) return dx
Example #17
Source File: layers.py From deep-learning-note with MIT License | 5 votes |
def __backward(self, dout): dbeta = dout.sum(axis=0) dgamma = np.sum(self.xn * dout, axis=0) dxn = self.gamma * dout dxc = dxn / self.std dstd = -np.sum((dxn * self.xc) / (self.std * self.std), axis=0) dvar = 0.5 * dstd / self.std dxc += (2.0 / self.batch_size) * self.xc * dvar dmu = np.sum(dxc, axis=0) dx = dxc - dmu / self.batch_size self.dgamma = dgamma self.dbeta = dbeta return dx
Example #18
Source File: layers.py From deep-learning-note with MIT License | 5 votes |
def backward(self, dout): dx = np.dot(dout, self.W.T) self.dW = np.dot(self.x.T, dout) self.db = np.sum(dout, axis=0) dx = dx.reshape(*self.original_x_shape) # 还原输入数据的形状(对应张量) return dx
Example #19
Source File: functions.py From deep-learning-note with MIT License | 5 votes |
def mean_squared_error(y, t): return 0.5 * np.sum((y - t) ** 2)
Example #20
Source File: functions.py From deep-learning-note with MIT License | 5 votes |
def softmax(x): if x.ndim == 2: x = x.T x = x - np.max(x, axis=0) y = np.exp(x) / np.sum(np.exp(x), axis=0) return y.T x = x - np.max(x) # 溢出对策 return np.exp(x) / np.sum(np.exp(x))
Example #21
Source File: 6_nn_basis.py From deep-learning-note with MIT License | 5 votes |
def softmax(a): c = np.max(a) exp_a = np.exp(a - c) # 溢出对策 sum_exp_a = np.sum(exp_a) y = exp_a / sum_exp_a return y
Example #22
Source File: 9_two_layer_net_naive.py From deep-learning-note with MIT License | 5 votes |
def accuracy(self, x, t): y = self.predict(x) y = np.argmax(y, axis=1) t = np.argmax(t, axis=1) accuracy = np.sum(y==t) / float(x.shape[0]) return accuracy
Example #23
Source File: files.py From neuropythy with GNU Affero General Public License v3.0 | 5 votes |
def cifti_split(cii, label=('lh', 'rh', 'rest'), subject=None, hemi=None, null=np.nan): ''' cifti_split(cii, label) yields the rows or columns of the given cifti file that correspond to the given label (see below). cifti_split(cii) is equivalent to cifti_split(cii, ('lh', 'rh', 'rest')). The label argument may be any of the following: * a valid CIFTI label name such as 'CIFTI_STRUCTURE_CEREBELLUM' or 'CIFTI_STRUCTURE_CORTEX_LEFT'; * an abbreviated name such as 'cerebellum' for 'CIFTI_STRUCTURE_CEREBELLUM'. * the abbreviations 'lh' and 'rh' which stand for 'CIFTI_STRUCTURE_CORTEX_LEFT' and 'CIFTI_STRUCTURE_CORTEX_RIGHT'; * the special keyword 'rest', which represents all the rows/columns not collected by any other instruction ('rest', by itself, results in the whole matrix being returned); or * A tuple of the above, indicating that each of the items listed should be returned sequentially in a tuple. The following optional arguments may be given: * subject (default: None) may specify the subject * hemi (default: None) can specify the hemisphere object that ''' dat = np.asanyarray(cii.dataobj if is_image(cii) else cii) n = dat.shape[-1] atlas = cifti_split._size_data.get(n, None) if atlas is None: raise ValueError('cannot split cifti with size %d' % n) if atlas not in cifti_split._atlas_cache: patt = os.path.join('data', 'fs_LR', '%s.atlasroi.%dk_fs_LR.shape.gii') lgii = nib.load(os.path.join(library_path(), patt % ('lh', atlas))) rgii = nib.load(os.path.join(library_path(), patt % ('rh', atlas))) cifti_split._atlas_cache[atlas] = tuple([pimms.imm_array(gii.darrays[0].data.astype('bool')) for gii in (lgii, rgii)]) (lroi,rroi) = cifti_split._atlas_cache[atlas] (ln,lN) = (np.sum(lroi), len(lroi)) (rn,rN) = (np.sum(rroi), len(rroi)) (ldat,rdat,sdat) = [np.full(dat.shape[:-1] + (k,), null) for k in [lN, rN, n - ln - rn]] ldat[..., lroi] = dat[..., :ln] rdat[..., rroi] = dat[..., ln:(ln+rn)] sdat[...] = dat[..., (ln+rn):] if ln + rn >= n: sdat = None return (ldat, rdat, sdat)
Example #24
Source File: 10_two_layer_net.py From deep-learning-note with MIT License | 5 votes |
def accuracy(self, x, t): y = self.predict(x) y = np.argmax(y, axis=1) if t.ndim != 1: t = np.argmax(t, axis=1) accuracy = np.sum(y == t) / float(x.shape[0]) return accuracy
Example #25
Source File: util.py From neuropythy with GNU Affero General Public License v3.0 | 5 votes |
def normalize(u): ''' normalize(u) yields a vetor with the same direction as u but unit length, or, if u has zero length, yields u. ''' u = np.asarray(u) unorm = np.sqrt(np.sum(u**2, axis=0)) z = np.isclose(unorm, 0) c = np.logical_not(z) / (unorm + z) return u * c
Example #26
Source File: kde.py From svviz with MIT License | 5 votes |
def evaluate(self, points): points = atleast_2d(points) d, m = points.shape if d != self.d: if d == 1 and m == self.d: # points was passed in as a row vector points = reshape(points, (self.d, 1)) m = 1 else: msg = "points have dimension %s, dataset has dimension %s" % (d, self.d) raise ValueError(msg) result = zeros((m,), dtype=np.float) if m >= self.n: # there are more points than data, so loop over data for i in range(self.n): diff = self.dataset[:, i, newaxis] - points tdiff = dot(self.inv_cov, diff) energy = sum(diff*tdiff,axis=0) / 2.0 result = result + exp(-energy) else: # loop over points for i in range(m): diff = self.dataset - points[:, i, newaxis] tdiff = dot(self.inv_cov, diff) energy = sum(diff * tdiff, axis=0) / 2.0 result[i] = sum(exp(-energy), axis=0) result = result / self._norm_factor return result
Example #27
Source File: 7_gradient.py From deep-learning-note with MIT License | 5 votes |
def mean_squared_error(y, t): return 0.5 * np.sum((y-t)**2) # 交叉熵误差 # 支持单个和 batch
Example #28
Source File: 3_perceptron.py From deep-learning-note with MIT License | 5 votes |
def OR(x1, x2): x = np.array([x1, x2]) w = np.array([0.5, 0.5]) b = -0.2 tmp = np.sum(w * x) + b if tmp <= 0: return 0 return 1 # 异或门
Example #29
Source File: 3_perceptron.py From deep-learning-note with MIT License | 5 votes |
def NAND(x1, x2): x = np.array([x1, x2]) w = np.array([-0.5, -0.5]) b = -0.7 tmp = np.sum(w * x) + b if tmp <= 0: return 0 return 1 # 或门
Example #30
Source File: multi_layer_net_extend.py From deep-learning-note with MIT License | 5 votes |
def accuracy(self, X, T): Y = self.predict(X, train_flg=False) Y = np.argmax(Y, axis=1) if T.ndim != 1: T = np.argmax(T, axis=1) accuracy = np.sum(Y == T) / float(X.shape[0]) return accuracy