Python autograd.numpy.prod() Examples
The following are 30
code examples of autograd.numpy.prod().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
autograd.numpy
, or try the search function
.
Example #1
Source File: ar.py From autohmm with BSD 2-Clause "Simplified" License | 6 votes |
def _do_optim(self, p, optim_x0, gn, data, entries='all'): optim_bounds = [self.wrt_bounds[p] for k in range(np.prod(self.wrt_dims[p]))] result = minimize(fun=self._optim_wrap,jac=True, x0=np.array(optim_x0).reshape(-1), args=(p, {'wrt': p, 'p': self.precision_, 'm': self.mu_, 'a': self.alpha_, 'xn': data['obs'], 'xln': data['lagged'], 'gn': gn, # post. uni. concat. 'entries': entries }), bounds=optim_bounds, method='TNC') new_value = result.x.reshape(self.wrt_dims[p]) return new_value
Example #2
Source File: g.py From pymoo with Apache License 2.0 | 6 votes |
def _evaluate(self, x, out, *args, **kwargs): l = [] for j in range(self.n_var): l.append((j + 1) * x[:, j] ** 2) sum_jx = anp.sum(anp.column_stack(l), axis=1) a = anp.sum(anp.cos(x) ** 4, axis=1) b = 2 * anp.prod(anp.cos(x) ** 2, axis=1) c = (anp.sqrt(sum_jx)).flatten() c = c + (c == 0) * 1e-20 f = -anp.absolute((a - b) / c) # Constraints g1 = -anp.prod(x, 1) + 0.75 g2 = anp.sum(x, axis=1) - 7.5 * self.n_var out["F"] = f out["G"] = anp.column_stack([g1, g2])
Example #3
Source File: observation.py From scarlet with MIT License | 6 votes |
def log_norm(self): try: return self._log_norm except AttributeError: if self.frame != self.model_frame: images_ = self.images[self.slices_for_images] weights_ = self.weights[self.slices_for_images] else: images_ = self.images weights_ = self.weights # normalization of the single-pixel likelihood: # 1 / [(2pi)^1/2 (sigma^2)^1/2] # with inverse variance weights: sigma^2 = 1/weight # full likelihood is sum over all data samples: pixel in images # NOTE: this assumes that all pixels are used in likelihood! log_sigma = np.zeros(weights_.shape, dtype=self.weights.dtype) cuts = weights_ > 0 log_sigma[cuts] = np.log(1 / weights_[cuts]) self._log_norm = ( np.prod(images_.shape) / 2 * np.log(2 * np.pi) + np.sum(log_sigma) / 2 ) return self._log_norm
Example #4
Source File: observation.py From scarlet with MIT License | 6 votes |
def get_loss(self, model): """Computes the loss/fidelity of a given model wrt to the observation Parameters ---------- model: array A model from `Blend` Returns ------- loss: float Loss of the model """ model_ = self.render(model) images_ = self.images weights_ = self.weights # properly normalized likelihood log_sigma = np.zeros(weights_.shape, dtype=weights_.dtype) cuts = weights_ > 0 log_sigma[cuts] = np.log(1 / weights_[cuts]) log_norm = ( np.prod(images_.shape) / 2 * np.log(2 * np.pi) + np.sum(log_sigma) / 2 ) return log_norm + 0.5 * np.sum(weights_ * (model_ - images_) ** 2)
Example #5
Source File: g.py From pymop with Apache License 2.0 | 6 votes |
def _evaluate(self, x, out, *args, **kwargs): l = [] for j in range(self.n_var): l.append((j + 1) * x[:, j] ** 2) sum_jx = anp.sum(anp.column_stack(l), axis=1) a = anp.sum(anp.cos(x) ** 4, axis=1) b = 2 * anp.prod(anp.cos(x) ** 2, axis=1) c = (anp.sqrt(sum_jx)).flatten() c = c + (c == 0) * 1e-20 f = -anp.absolute((a - b) / c) # Constraints g1 = -anp.prod(x, 1) + 0.75 g2 = anp.sum(x, axis=1) - 7.5 * self.n_var out["F"] = f out["G"] = anp.column_stack([g1, g2])
Example #6
Source File: dtlz.py From pymop with Apache License 2.0 | 5 votes |
def _evaluate(self, x, out, *args, **kwargs): X_, X_M = x[:, :self.n_obj - 1], x[:, self.n_obj - 1:] g = self.g1(X_M) f = [] for i in range(0, self.n_obj): _f = 0.5 * (1 + g) _f *= anp.prod(X_[:, :X_.shape[1] - i], axis=1) if i > 0: _f *= 1 - X_[:, X_.shape[1] - i] f.append(_f) out["F"] = anp.column_stack(f)
Example #7
Source File: autoptim.py From autoptim with MIT License | 5 votes |
def _convert_bounds(bounds, shapes): output_bounds = [] for shape, bound in zip(shapes, bounds): # Check is the bound is already parsable by scipy.optimize b = bound[0] if isinstance(b, (list, tuple, np.ndarray)): output_bounds += bound else: output_bounds += [bound, ] * np.prod(shape) return output_bounds
Example #8
Source File: autoptim.py From autoptim with MIT License | 5 votes |
def _split(x, shapes): x_split = np.split(x, np.cumsum([np.prod(shape) for shape in shapes[:-1]])) optim_vars = [var.reshape(*shape) for (var, shape) in zip(x_split, shapes)] return optim_vars
Example #9
Source File: data.py From kernel-gof with MIT License | 5 votes |
def cross_sine_intensity(self, X): intensity = self.lamb_bar*np.prod(np.sin(self.w*X*np.pi),1) return intensity
Example #10
Source File: data.py From kernel-gof with MIT License | 5 votes |
def cross_sine_intensity(self, X): intensity = self.lamb_bar*np.prod(np.sin(self.w*X*np.pi),1) return intensity
Example #11
Source File: data.py From kernel-gof with MIT License | 5 votes |
def lamb_sin(self, X): return np.prod(np.sin(self.w*np.pi*X),1)*15
Example #12
Source File: data.py From kernel-gof with MIT License | 5 votes |
def sample(self, n, seed=872): """ Rejection sampling. """ d = len(self.freqs) sigma2 = self.sigma2 freqs = self.freqs with util.NumpySeedContext(seed=seed): # rejection sampling sam = np.zeros((n, d)) # sample block_size*d at a time. block_size = 500 from_ind = 0 while from_ind < n: # The proposal q is N(0, sigma2*I) X = np.random.randn(block_size, d)*np.sqrt(sigma2) q_un = np.exp(old_div(-np.sum(X**2, 1),(2.0*sigma2))) # unnormalized density p p_un = q_un*(1+np.prod(np.cos(X*freqs), 1)) c = 2.0 I = stats.uniform.rvs(size=block_size) < old_div(p_un,(c*q_un)) # accept accepted_count = np.sum(I) to_take = min(n - from_ind, accepted_count) end_ind = from_ind + to_take AX = X[I, :] X_take = AX[:to_take, :] sam[from_ind:end_ind, :] = X_take from_ind = end_ind return Data(sam)
Example #13
Source File: density.py From kernel-gof with MIT License | 5 votes |
def lamb_sin(self, X): return np.prod(np.sin(self.w*np.pi*X),1)
Example #14
Source File: griewank.py From pymop with Apache License 2.0 | 5 votes |
def _evaluate(self, x, out, *args, **kwargs): out["F"] = 1 + 1 / 4000 * np.sum(np.power(x, 2), axis=1) \ - np.prod(np.cos(x / np.sqrt(np.arange(1, x.shape[1] + 1))), axis=1)
Example #15
Source File: dtlz.py From pymop with Apache License 2.0 | 5 votes |
def obj_func(self, X_, g, alpha=1): f = [] for i in range(0, self.n_obj): _f = (1 + g) _f *= anp.prod(anp.cos(anp.power(X_[:, :X_.shape[1] - i], alpha) * anp.pi / 2.0), axis=1) if i > 0: _f *= anp.sin(anp.power(X_[:, X_.shape[1] - i], alpha) * anp.pi / 2.0) f.append(_f) f = anp.column_stack(f) return f
Example #16
Source File: test_numpy.py From autograd with MIT License | 5 votes |
def test_prod_2(): def fun(x): return np.prod(x, axis=0) mat = npr.randn(2, 3)**2 + 0.1 check_grads(fun)(mat)
Example #17
Source File: g.py From pymop with Apache License 2.0 | 5 votes |
def _evaluate(self, x, out, *args, **kwargs): f = -anp.sqrt(self.n_var) ** self.n_var * anp.prod(x, axis=1) # Constraints g = anp.absolute(anp.sum(x ** 2, axis=1) - 1) - 1e-4 out["F"] = f out["G"] = g
Example #18
Source File: tm.py From autohmm with BSD 2-Clause "Simplified" License | 5 votes |
def _do_mstep_grad(self, gn, data): wrt = [str(p) for p in self.wrt if str(p) in self.params] for update_idx in range(self.n_iter_update): for p in wrt: if p == 'm': optim_x0 = self.mu_ wrt_arg = 0 elif p == 'p': optim_x0 = self.precision_ wrt_arg = 1 else: raise ValueError('unknown parameter') optim_bounds = [self.wrt_bounds[p] for k in range(np.prod(self.wrt_dims[p]))] result = minimize(fun=self._optim_wrap, jac=True, x0=np.array(optim_x0).reshape(-1), args=(p, {'wrt': wrt_arg, 'p': self.precision_, 'm': self.mu_, 'xn': data['obs'], 'gn': gn # post. uni. concat. }), bounds=optim_bounds, method='TNC') newv = result.x.reshape(self.wrt_dims[p]) if p == 'm': self.mu_ = newv elif p == 'p': # ensure that precision matrix is symmetric for u in range(self.n_unique): newv[u,:,:] = (newv[u,:,:] + newv[u,:,:].T)/2.0 self.precision_ = newv else: raise ValueError('unknown parameter')
Example #19
Source File: convnet.py From MLAlgorithms with MIT License | 5 votes |
def shape(self, x_shape): return x_shape[0], np.prod(x_shape[1:])
Example #20
Source File: test_numpy.py From autograd with MIT License | 5 votes |
def test_prod_4(): def fun(x): return np.prod(x) mat = npr.randn(7)**2 + 0.1 check_grads(fun)(mat)
Example #21
Source File: test_numpy.py From autograd with MIT License | 5 votes |
def test_prod_3(): def fun(x): return np.prod(x, axis=0, keepdims=True) mat = npr.randn(2, 3)**2 + 0.1 check_grads(fun)(mat)
Example #22
Source File: dtlz.py From pymoo with Apache License 2.0 | 5 votes |
def obj_func(self, X_, g, alpha=1): f = [] for i in range(0, self.n_obj): _f = (1 + g) _f *= anp.prod(anp.cos(anp.power(X_[:, :X_.shape[1] - i], alpha) * anp.pi / 2.0), axis=1) if i > 0: _f *= anp.sin(anp.power(X_[:, X_.shape[1] - i], alpha) * anp.pi / 2.0) f.append(_f) f = anp.column_stack(f) return f
Example #23
Source File: test_numpy.py From autograd with MIT License | 5 votes |
def test_prod_1(): def fun(x): return np.prod(x) mat = npr.randn(2, 3)**2 / 10.0 + 0.1 # Gradient unstable when zeros are present. check_grads(fun)(mat)
Example #24
Source File: test_jacobian.py From autograd with MIT License | 5 votes |
def test_jacobian_against_stacked_grads(): scalar_funs = [ lambda x: np.sum(x**3), lambda x: np.prod(np.sin(x) + np.sin(x)), lambda x: grad(lambda y: np.exp(y) * np.tanh(x[0]))(x[1]) ] vector_fun = lambda x: np.array([f(x) for f in scalar_funs]) x = npr.randn(5) jac = jacobian(vector_fun)(x) grads = [grad(f)(x) for f in scalar_funs] assert np.allclose(jac, np.vstack(grads))
Example #25
Source File: data.py From autograd with MIT License | 5 votes |
def load_mnist(): partial_flatten = lambda x : np.reshape(x, (x.shape[0], np.prod(x.shape[1:]))) one_hot = lambda x, k: np.array(x[:,None] == np.arange(k)[None, :], dtype=int) train_images, train_labels, test_images, test_labels = data_mnist.mnist() train_images = partial_flatten(train_images) / 255.0 test_images = partial_flatten(test_images) / 255.0 train_labels = one_hot(train_labels, 10) test_labels = one_hot(test_labels, 10) N_data = train_images.shape[0] return N_data, train_images, train_labels, test_images, test_labels
Example #26
Source File: convnet.py From autograd with MIT License | 5 votes |
def forward_pass(self, inputs, param_vector): params = self.parser.get(param_vector, 'params') biases = self.parser.get(param_vector, 'biases') if inputs.ndim > 2: inputs = inputs.reshape((inputs.shape[0], np.prod(inputs.shape[1:]))) return self.nonlinearity(np.dot(inputs[:, :], params) + biases)
Example #27
Source File: convnet.py From autograd with MIT License | 5 votes |
def build_weights_dict(self, input_shape): # Input shape is anything (all flattened) input_size = np.prod(input_shape, dtype=int) self.parser = WeightsParser() self.parser.add_weights('params', (input_size, self.size)) self.parser.add_weights('biases', (self.size,)) return self.parser.N, (self.size,)
Example #28
Source File: convnet.py From autograd with MIT License | 5 votes |
def add_weights(self, name, shape): start = self.N self.N += np.prod(shape) self.idxs_and_shapes[name] = (slice(start, self.N), shape)
Example #29
Source File: model.py From tree-regularization-public with MIT License | 5 votes |
def add_shape(self, name, shape): start = self.num_weights self.num_weights += np.prod(shape) self.idxs_and_shapes[name] = (slice(start, self.num_weights), shape)
Example #30
Source File: einsum2.py From momi2 with GNU General Public License v3.0 | 5 votes |
def _reshape(in_arr, in_sublist, *out_sublists): assert len(out_sublists) == 3 old_sublist = in_sublist in_sublist = sum(out_sublists, []) in_arr = _transpose(in_arr, old_sublist, in_sublist) # in_arr.shape breaks in autograd if it has no dimension if in_sublist: shapes = {s:i for i,s in zip(in_arr.shape, in_sublist)} else: shapes = {} return np.reshape(in_arr, [np.prod([shapes[s] for s in out_subs], dtype=int) for out_subs in out_sublists])