Python autograd.numpy.log() Examples
The following are 30
code examples of autograd.numpy.log().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
autograd.numpy
, or try the search function
.
Example #1
Source File: standard_models.py From pyhawkes with MIT License | 6 votes |
def objective(self, w): obj = 0 N = float(sum([np.sum(d[1]) for d in self.data_list])) for F,S in self.data_list: psi = np.dot(F, w) lam = self.link(psi) obj -= np.sum(S * np.log(lam) -lam*self.dt) / N # assert np.isfinite(ll) # Add penalties obj += (0.5 * np.sum(w[1:]**2) / self.sigma**2) / N obj += np.sum(np.abs(w[1:]) * self.lmbda) / N # assert np.isfinite(obj) return obj
Example #2
Source File: convnet.py From autograd with MIT License | 6 votes |
def make_nn_funs(input_shape, layer_specs, L2_reg): parser = WeightsParser() cur_shape = input_shape for layer in layer_specs: N_weights, cur_shape = layer.build_weights_dict(cur_shape) parser.add_weights(layer, (N_weights,)) def predictions(W_vect, inputs): """Outputs normalized log-probabilities. shape of inputs : [data, color, y, x]""" cur_units = inputs for layer in layer_specs: cur_weights = parser.get(W_vect, layer) cur_units = layer.forward_pass(cur_units, cur_weights) return cur_units def loss(W_vect, X, T): log_prior = -L2_reg * np.dot(W_vect, W_vect) log_lik = np.sum(predictions(W_vect, X) * T) return - log_prior - log_lik def frac_err(W_vect, X, T): return np.mean(np.argmax(T, axis=1) != np.argmax(pred_fun(W_vect, X), axis=1)) return parser.N, predictions, loss, frac_err
Example #3
Source File: bench_rnn.py From autograd with MIT License | 6 votes |
def setup(self): self.batch_size = 16 self.dtype = "float32" self.D = 2**10 self.x = 0.01 * np.random.randn(self.batch_size,self.D).astype(self.dtype) self.W1 = 0.01 * np.random.randn(self.D,self.D).astype(self.dtype) self.b1 = 0.01 * np.random.randn(self.D).astype(self.dtype) self.Wout = 0.01 * np.random.randn(self.D,1).astype(self.dtype) self.bout = 0.01 * np.random.randn(1).astype(self.dtype) self.l = (np.random.rand(self.batch_size,1) > 0.5).astype(self.dtype) self.n = 50 def autograd_rnn(params, x, label, n): W, b, Wout, bout = params h1 = x for i in range(n): h1 = np.tanh(np.dot(h1, W) + b) logit = np.dot(h1, Wout) + bout loss = -np.sum(label * logit - ( logit + np.log(1 + np.exp(-logit)))) return loss self.fn = autograd_rnn self.grad_fn = grad(self.fn)
Example #4
Source File: likelihood.py From momi2 with GNU General Public License v3.0 | 6 votes |
def _composite_log_likelihood(data, demo, mut_rate=None, truncate_probs=0.0, vector=False, p_missing=None, use_pairwise_diffs=False, **kwargs): try: sfs = data.sfs except AttributeError: sfs = data sfs_probs = np.maximum(expected_sfs(demo, sfs.configs, normalized=True, **kwargs), truncate_probs) log_lik = sfs._integrate_sfs(np.log(sfs_probs), vector=vector) # add on log likelihood of poisson distribution for total number of SNPs if mut_rate is not None: log_lik = log_lik + \ _mut_factor(sfs, demo, mut_rate, vector, p_missing, use_pairwise_diffs) if not vector: log_lik = np.squeeze(log_lik) return log_lik
Example #5
Source File: sfs.py From momi2 with GNU General Public License v3.0 | 6 votes |
def _entropy(self): counts = self._total_freqs n_snps = float(self.n_snps()) p = counts / n_snps # return np.sum(p * np.log(p)) ret = np.sum(p * np.log(p)) # correct for missing data sampled_n = np.sum(self.configs.value, axis=2) sampled_n_counts = co.Counter() assert len(counts) == len(sampled_n) for c, n in zip(counts, sampled_n): n = tuple(n) sampled_n_counts[n] += c sampled_n_counts = np.array( list(sampled_n_counts.values()), dtype=float) ret = ret + np.sum(sampled_n_counts / n_snps * np.log(n_snps / sampled_n_counts)) assert not np.isnan(ret) return ret
Example #6
Source File: demo_plotter.py From momi2 with GNU General Public License v3.0 | 6 votes |
def goto_time(self, t, add_time=True): # if exponentially growing, add extra time points whenever # the population size doubles if self.curr_g != 0 and t < float('inf'): halflife = np.abs(np.log(.5) / self.curr_g) add_t = self.curr_t + halflife while add_t < t: self._push_time(add_t) add_t += halflife while self.time_stack and self.time_stack[0] < t: self.step_time(hq.heappop(self.time_stack)) self.step_time(t, add=False) if add_time: # put t on queue to be added when processing next event # (allows further events to change population size before plotting) self._push_time(t)
Example #7
Source File: hmm_em.py From autograd with MIT License | 6 votes |
def EM(init_params, data, callback=None): def EM_update(params): natural_params = list(map(np.log, params)) loglike, E_stats = vgrad(log_partition_function)(natural_params, data) # E step if callback: callback(loglike, params) return list(map(normalize, E_stats)) # M step def fixed_point(f, x0): x1 = f(x0) while different(x0, x1): x0, x1 = x1, f(x1) return x1 def different(params1, params2): allclose = partial(np.allclose, atol=1e-3, rtol=1e-3) return not all(map(allclose, params1, params2)) return fixed_point(EM_update, init_params)
Example #8
Source File: black_box_svi.py From autograd with MIT License | 6 votes |
def black_box_variational_inference(logprob, D, num_samples): """Implements http://arxiv.org/abs/1401.0118, and uses the local reparameterization trick from http://arxiv.org/abs/1506.02557""" def unpack_params(params): # Variational dist is a diagonal Gaussian. mean, log_std = params[:D], params[D:] return mean, log_std def gaussian_entropy(log_std): return 0.5 * D * (1.0 + np.log(2*np.pi)) + np.sum(log_std) rs = npr.RandomState(0) def variational_objective(params, t): """Provides a stochastic estimate of the variational lower bound.""" mean, log_std = unpack_params(params) samples = rs.randn(num_samples, D) * np.exp(log_std) + mean lower_bound = gaussian_entropy(log_std) + np.mean(logprob(samples, t)) return -lower_bound gradient = grad(variational_objective) return variational_objective, gradient, unpack_params
Example #9
Source File: model.py From tree-regularization-public with MIT License | 6 votes |
def softplus(x): """ Numerically stable transform from real line to positive reals Returns np.log(1.0 + np.exp(x)) Autograd friendly and fully vectorized @param x: array of values in (-\infty, +\infty) @return ans : array of values in (0, +\infty), same size as x """ if not isinstance(x, float): mask1 = x > 0 mask0 = np.logical_not(mask1) out = np.zeros_like(x) out[mask0] = np.log1p(np.exp(x[mask0])) out[mask1] = x[mask1] + np.log1p(np.exp(-x[mask1])) return out if x > 0: return x + np.log1p(np.exp(-x)) else: return np.log1p(np.exp(x))
Example #10
Source File: methods.py From tf-quant-finance with Apache License 2.0 | 6 votes |
def _build_errors_df(name_errors, label): """Helper to build errors DataFrame.""" series = [] percentiles = np.linspace(0, 100, 21) index = percentiles / 100 for name, errors in name_errors: series.append(pd.Series( np.nanpercentile(errors, q=percentiles), index=index, name=name)) df = pd.concat(series, axis=1) df.columns.name = 'derivative' df.index.name = 'quantile' df = df.stack().rename('error').reset_index() with np.errstate(divide='ignore'): df['log(error)'] = np.log(df['error']) if label is not None: df['label'] = label return df
Example #11
Source File: demo_utils.py From momi2 with GNU General Public License v3.0 | 6 votes |
def simple_nea_admixture_demo(N_chb_bottom, N_chb_top, pulse_t, pulse_p, ej_chb, ej_yri, sampled_n=(14, 10)): ej_chb = pulse_t + ej_chb ej_yri = ej_chb + ej_yri G_chb = -np.log(N_chb_top / N_chb_bottom) / ej_chb model = momi.DemographicModel(1., .25) model.add_leaf("yri") model.add_leaf("chb") model.set_size("chb", 0., N=N_chb_bottom, g=G_chb) model.move_lineages("chb", "nea", t=pulse_t, p=pulse_p) model.move_lineages("chb", "yri", t=ej_chb) model.move_lineages("yri", "nea", t=ej_yri) return model #events = [('-en', 0., 'chb', N_chb_bottom), # ('-eg', 0, 'chb', G_chb), # ('-ep', pulse_t, 'chb', 'nea', pulse_p), # ('-ej', ej_chb, 'chb', 'yri'), # ('-ej', ej_yri, 'yri', 'nea'), # ] #return make_demo_hist(events, ('yri', 'chb'), sampled_n) ## return make_demography(events, ('yri','chb'), sampled_n)
Example #12
Source File: beta_geo_beta_binom_fitter.py From lifetimes with MIT License | 6 votes |
def _loglikelihood(params, x, tx, T): warnings.simplefilter(action="ignore", category=FutureWarning) """Log likelihood for optimizer.""" alpha, beta, gamma, delta = params betaln_ab = betaln(alpha, beta) betaln_gd = betaln(gamma, delta) A = betaln(alpha + x, beta + T - x) - betaln_ab + betaln(gamma, delta + T) - betaln_gd B = 1e-15 * np.ones_like(T) recency_T = T - tx - 1 for j in np.arange(recency_T.max() + 1): ix = recency_T >= j B = B + ix * betaf(alpha + x, beta + tx - x + j) * betaf(gamma + 1, delta + tx + j) B = log(B) - betaln_gd - betaln_ab return logaddexp(A, B)
Example #13
Source File: ctp.py From pymoo with Apache License 2.0 | 6 votes |
def __init__(self, n_var=2, n_constr=2, **kwargs): super().__init__(n_var, n_constr, **kwargs) a, b = anp.zeros(n_constr + 1), anp.zeros(n_constr + 1) a[0], b[0] = 1, 1 delta = 1 / (n_constr + 1) alpha = delta for j in range(n_constr): beta = a[j] * anp.exp(-b[j] * alpha) a[j + 1] = (a[j] + beta) / 2 b[j + 1] = - 1 / alpha * anp.log(beta / a[j + 1]) alpha += delta self.a = a[1:] self.b = b[1:]
Example #14
Source File: eucl_simple_model.py From hyperbolic_cones with Apache License 2.0 | 6 votes |
def _compute_loss(self): """Compute and store loss value for the given batch of examples.""" if self._loss_computed: return self._compute_distances() # NLL loss from the NIPS paper. exp_negative_distances = np.exp(-self.euclidean_dists) # (1 + neg_size, batch_size) # Remove the value for the true edge (u,v) from the partition function Z = exp_negative_distances[1:].sum(axis=0) # (batch_size) self.exp_negative_distances = exp_negative_distances # (1 + neg_size, batch_size) self.Z = Z # (batch_size) self.pos_loss = self.euclidean_dists[0].sum() self.neg_loss = np.log(self.Z).sum() self.loss = self.pos_loss + self.neg_loss # scalar self._loss_computed = True
Example #15
Source File: poincare_model.py From hyperbolic_cones with Apache License 2.0 | 6 votes |
def _nll_loss_fn(poincare_dists): """ Parameters ---------- poincare_dists : numpy.array All distances d(u,v) and d(u,v'), where v' is negative. Shape (1 + negative_size). Returns ---------- log-likelihood loss function from the NIPS paper, Eq (6). """ exp_negative_distances = grad_np.exp(-poincare_dists) # Remove the value for the true edge (u,v) from the partition function # return -grad_np.log(exp_negative_distances[0] / (- exp_negative_distances[0] + exp_negative_distances.sum())) return poincare_dists[0] + grad_np.log(exp_negative_distances[1:].sum())
Example #16
Source File: observation.py From scarlet with MIT License | 6 votes |
def get_loss(self, model): """Computes the loss/fidelity of a given model wrt to the observation Parameters ---------- model: array A model from `Blend` Returns ------- loss: float Loss of the model """ model_ = self.render(model) images_ = self.images weights_ = self.weights # properly normalized likelihood log_sigma = np.zeros(weights_.shape, dtype=weights_.dtype) cuts = weights_ > 0 log_sigma[cuts] = np.log(1 / weights_[cuts]) log_norm = ( np.prod(images_.shape) / 2 * np.log(2 * np.pi) + np.sum(log_sigma) / 2 ) return log_norm + 0.5 * np.sum(weights_ * (model_ - images_) ** 2)
Example #17
Source File: observation.py From scarlet with MIT License | 6 votes |
def log_norm(self): try: return self._log_norm except AttributeError: if self.frame != self.model_frame: images_ = self.images[self.slices_for_images] weights_ = self.weights[self.slices_for_images] else: images_ = self.images weights_ = self.weights # normalization of the single-pixel likelihood: # 1 / [(2pi)^1/2 (sigma^2)^1/2] # with inverse variance weights: sigma^2 = 1/weight # full likelihood is sum over all data samples: pixel in images # NOTE: this assumes that all pixels are used in likelihood! log_sigma = np.zeros(weights_.shape, dtype=self.weights.dtype) cuts = weights_ > 0 log_sigma[cuts] = np.log(1 / weights_[cuts]) self._log_norm = ( np.prod(images_.shape) / 2 * np.log(2 * np.pi) + np.sum(log_sigma) / 2 ) return self._log_norm
Example #18
Source File: density.py From kernel-gof with MIT License | 6 votes |
def multivariate_normal_density(mean, cov, X): """ Exact density (not log density) of a multivariate Gaussian. mean: length-d array cov: a dxd covariance matrix X: n x d 2d-array """ evals, evecs = np.linalg.eigh(cov) cov_half_inv = evecs.dot(np.diag(evals**(-0.5))).dot(evecs.T) # print(evals) half_evals = np.dot(X-mean, cov_half_inv) full_evals = np.sum(half_evals**2, 1) unden = np.exp(-0.5*full_evals) Z = np.sqrt(np.linalg.det(2.0*np.pi*cov)) den = unden/Z assert len(den) == X.shape[0] return den
Example #19
Source File: density.py From kernel-gof with MIT License | 5 votes |
def log_normalized_den(self, X): pmix = self.pmix means = self.means variances = self.variances k, d = self.means.shape n = X.shape[0] den = np.zeros(n, dtype=float) for i in range(k): norm_den_i = IsoGaussianMixture.normal_density(means[i], variances[i], X) den = den + norm_den_i*pmix[i] return np.log(den) #def grad_log(self, X): # """ # Return an n x d numpy array of gradients. # """ # pmix = self.pmix # means = self.means # variances = self.variances # k, d = self.means.shape # # exact density. length-n array # den = np.exp(self.log_den(X)) # for i in range(k): # norm_den_i = IsoGaussianMixture.normal_density(means[i], # variances[i], X)
Example #20
Source File: density.py From kernel-gof with MIT License | 5 votes |
def grad_log(self, X): """ Evaluate the gradients (with respect to the input) of the log density at each of the n points in X. This is the score function. Given an implementation of log_den(), this method will automatically work. Subclasses may override this if a more efficient implementation is available. X: n x d numpy array. Return an n x d numpy array of gradients. """ g = autograd.elementwise_grad(self.log_den) G = g(X) return G
Example #21
Source File: density.py From kernel-gof with MIT License | 5 votes |
def log_den(self, X): """ Evaluate this log of the unnormalized density on the n points in X. X: n x d numpy array Return a one-dimensional numpy array of length n. """ raise NotImplementedError()
Example #22
Source File: density.py From kernel-gof with MIT License | 5 votes |
def log_normalized_den(self, X): """ Evaluate the exact normalized log density. The difference to log_den() is that this method adds the normalizer. This method is not compulsory. Subclasses do not need to override. """ raise NotImplementedError()
Example #23
Source File: define_gradient.py From autograd with MIT License | 5 votes |
def logsumexp(x): """Numerically stable log(sum(exp(x))), also defined in scipy.special""" max_x = np.max(x) return max_x + np.log(np.sum(np.exp(x - max_x))) # Next, we write a function that specifies the gradient with a closure. # The reason for the closure is so that the gradient can depend # on both the input to the original function (x), and the output of the # original function (ans).
Example #24
Source File: beta_geo_fitter.py From lifetimes with MIT License | 5 votes |
def _negative_log_likelihood( log_params, freq, rec, T, weights, penalizer_coef ): """ The following method for calculatating the *log-likelihood* uses the method specified in section 7 of [2]_. More information can also be found in [3]_. References ---------- .. [2] Fader, Peter S., Bruce G.S. Hardie, and Ka Lok Lee (2005a), "Counting Your Customers the Easy Way: An Alternative to the Pareto/NBD Model," Marketing Science, 24 (2), 275-84. .. [3] http://brucehardie.com/notes/004/ """ warnings.simplefilter(action="ignore", category=FutureWarning) params = np.exp(log_params) r, alpha, a, b = params A_1 = gammaln(r + freq) - gammaln(r) + r * np.log(alpha) A_2 = gammaln(a + b) + gammaln(b + freq) - gammaln(b) - gammaln(a + b + freq) A_3 = -(r + freq) * np.log(alpha + T) A_4 = np.log(a) - np.log(b + np.maximum(freq, 1) - 1) - (r + freq) * np.log(rec + alpha) penalizer_term = penalizer_coef * sum(params ** 2) ll = weights * (A_1 + A_2 + np.log(np.exp(A_3) + np.exp(A_4) * (freq > 0))) return -ll.sum() / weights.sum() + penalizer_term
Example #25
Source File: convnet.py From autograd with MIT License | 5 votes |
def logsumexp(X, axis, keepdims=False): max_X = np.max(X) return max_X + np.log(np.sum(np.exp(X - max_X), axis=axis, keepdims=keepdims))
Example #26
Source File: model.py From tree-regularization-public with MIT License | 5 votes |
def safe_log(x, minval=1e-100): return np.log(np.maximum(x, minval))
Example #27
Source File: model.py From tree-regularization-public with MIT License | 5 votes |
def build_mlp(layer_sizes, activation=np.tanh, output_activation=lambda x: x): """Constructor for multilayer perceptron. @param layer_sizes: list of integers list of layer sizes in the perceptron. @param activation: function (default: np.tanh) what activation to use after first N - 1 layers. @param output_activation: function (default: linear) what activation to use after last layer. @return predict: function used to predict y_hat @return log_likelihood: function used to compute log likelihood @return parser: WeightsParser object object to organize weights """ parser = WeightsParser() for i, shape in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])): parser.add_shape(('weights', i), shape) parser.add_shape(('biases', i), (1, shape[1])) def predict(weights, X): cur_X = copy(X.T) for layer in range(len(layer_sizes) - 1): cur_W = parser.get(weights, ('weights', layer)) cur_B = parser.get(weights, ('biases', layer)) cur_Z = np.dot(cur_X, cur_W) + cur_B cur_X = activation(cur_Z) return output_activation(cur_Z.T) def log_likelihood(weights, X, y): y_hat = predict(weights, X) return mse(y.T, y_hat.T) return predict, log_likelihood, parser
Example #28
Source File: gamma_gamma_fitter.py From lifetimes with MIT License | 5 votes |
def _negative_log_likelihood( log_params, frequency, avg_monetary_value, weights, penalizer_coef ): """ Computes the Negative Log-Likelihood for the Gamma-Gamma Model as in: http://www.brucehardie.com/notes/025/ This also applies a penalizer to the log-likelihood. Equivalent to equation (1a). Hardie's implementation of this method can be seen on page 8. """ warnings.simplefilter(action="ignore", category=FutureWarning) params = np.exp(log_params) p, q, v = params x = frequency m = avg_monetary_value negative_log_likelihood_values = ( gammaln(p * x + q) - gammaln(p * x) - gammaln(q) + q * np.log(v) + (p * x - 1) * np.log(m) + (p * x) * np.log(x) - (p * x + q) * np.log(x * m + v) ) * weights penalizer_term = penalizer_coef * sum(params ** 2) return -negative_log_likelihood_values.sum() / weights.sum() + penalizer_term
Example #29
Source File: beta_geo_fitter.py From lifetimes with MIT License | 5 votes |
def conditional_probability_alive( self, frequency, recency, T ): """ Compute conditional probability alive. Compute the probability that a customer with history (frequency, recency, T) is currently alive. From http://www.brucehardie.com/notes/021/palive_for_BGNBD.pdf Parameters ---------- frequency: array or scalar historical frequency of customer. recency: array or scalar historical recency of customer. T: array or scalar age of the customer. Returns ------- array value representing a probability """ r, alpha, a, b = self._unload_params("r", "alpha", "a", "b") log_div = (r + frequency) * np.log((alpha + T) / (alpha + recency)) + np.log( a / (b + np.maximum(frequency, 1) - 1) ) return np.atleast_1d(np.where(frequency == 0, 1.0, expit(-log_div)))
Example #30
Source File: eucl_simple_model.py From hyperbolic_cones with Apache License 2.0 | 5 votes |
def _nll_loss_fn(euclidean_dists): """ Parameters ---------- poincare_dists : numpy.array All distances d(u,v) and d(u,v'), where v' is negative. Shape (1 + negative_size). Returns ---------- log-likelihood loss function from the NIPS paper, Eq (6). """ exp_negative_distances = grad_np.exp(-euclidean_dists) # Remove the value for the true edge (u,v) from the partition function return euclidean_dists[0] + grad_np.log(exp_negative_distances[1:].sum())