Python theano.tensor.floor() Examples
The following are 10
code examples of theano.tensor.floor().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
theano.tensor
, or try the search function
.
Example #1
Source File: rand.py From iaf with MIT License | 6 votes |
def discretized_laplace(mean, logscale, binsize, sample=None): scale = .5*T.exp(logscale) if sample is None: u = G.rng_curand.uniform(size=mean.shape) - .5 sample = mean - scale * T.sgn(u) * T.log(1-2*abs(u)) sample = T.floor(sample/binsize)*binsize #discretize the sample d = .5*binsize def cdf(x): z = x-mean return .5 + .5 * T.sgn(z) * (1.-T.exp(-abs(z)/scale)) def logmass1(x): # General method for probability mass, but numerically unstable for large |x-mean|/scale return T.log(cdf(x+d) - cdf(x-d) + 1e-7) def logmass2(x): # Only valid for |x-mean| >= d return -abs(x-mean)/scale + T.log(T.exp(d/scale)-T.exp(-d/scale)) - np.log(2.).astype(G.floatX) def logmass_stable(x): switch = (abs(x-mean) < d) return switch * logmass1(x) + (1-switch) * logmass2(x) logp = logmass_stable(sample).flatten(2).sum(axis=1) entr = None #(1 + logscale).flatten(2).sum(axis=1) return RandomVariable(sample, logp, entr, mean=mean, scale=scale)
Example #2
Source File: POI2Vec.py From Point-of-Interest-Recommendation with MIT License | 6 votes |
def compute_sub_all_scores(self, start_end): plu = softmax(T.dot(self.trained_users[start_end], self.trained_items.T))[:, :-1] # (n_batch, n_item) length = T.max(T.sum(self.tes_masks[start_end], axis=1)) # 253 cidx = T.arange(length).reshape((1, length)) + self.tra_accum_lens[start_end][:, 0].reshape((len(start_end), 1)) cl = T.sum(self.trained_items[self.tra_context_masks[cidx]], axis=2) # n_batch x seq_length x n_size cl = cl.dimshuffle(1, 2, 0) pb = self.trained_branch[self.routes] # (n_item x 4 x tree_depth x n_size) shp0, shp1, shp2 = self.lrs.shape lrs = self.lrs.reshape((shp0, shp1, shp2, 1, 1)) pr_bc = T.dot(pb, cl) br = sigmoid(pr_bc * lrs) * T.ceil(abs(pr_bc)) # (n_item x 4 x tree_depth x seq_length x n_batch) path = T.prod(br, axis=2) * self.probs.reshape((shp0, shp1, 1, 1)) del cl, pb, br, lrs # paths = T.prod((T.floor(1 - path) + path), axis=1) # (n_item x seq_length x n_batch) paths = T.sum(path, axis=1) paths = T.floor(1 - paths) + paths p = paths[:-1].T * plu.reshape((plu.shape[0], 1, plu.shape[1])) # (n_batch x n_item) # p = plu.reshape((plu.shape[0], 1, plu.shape[1])) * T.ones((plu.shape[0], length, plu.shape[1])) return T.reshape(p, (p.shape[0] * p.shape[1], p.shape[2])).eval()
Example #3
Source File: basic.py From D-VAE with MIT License | 5 votes |
def floor(x): """ Elemwise floor of `x`. """ # see decorator for function body
Example #4
Source File: rand.py From iaf with MIT License | 5 votes |
def discretized_logistic(mean, logscale, binsize, sample=None): scale = T.exp(logscale) if sample is None: u = G.rng_curand.uniform(size=mean.shape) _y = T.log(-u/(u-1)) #inverse CDF of the logistic sample = mean + scale * _y #sample from the actual logistic sample = T.floor(sample/binsize)*binsize #discretize the sample _sample = (T.floor(sample/binsize)*binsize - mean)/scale logps = T.log( T.nnet.sigmoid(_sample + binsize/scale) - T.nnet.sigmoid(_sample) + 1e-7) logp = logps.flatten(2).sum(axis=1) #raise Exception() entr = logscale.flatten(2) entr = entr.sum(axis=1) + 2. * entr.shape[1].astype(G.floatX) return RandomVariable(sample, logp, entr, mean=mean, logscale=logscale, logps=logps)
Example #5
Source File: rand.py From iaf with MIT License | 5 votes |
def discretized_gaussian(mean, logvar, binsize, sample=None): scale = T.exp(.5*logvar) if sample is None: _y = G.rng_curand.normal(size=mean.shape) sample = mean + scale * _y #sample from the actual logistic sample = T.floor(sample/binsize)*binsize #discretize the sample _sample = (T.floor(sample/binsize)*binsize - mean)/scale def _erf(x): return T.erf(x/T.sqrt(2.)) logp = T.log( _erf(_sample + binsize/scale) - _erf(_sample) + 1e-7) + T.log(.5) logp = logp.flatten(2).sum(axis=1) #raise Exception() entr = (.5 * (T.log(2 * math.pi) + 1 + logvar)).flatten(2).sum(axis=1) return RandomVariable(sample, logp, entr, mean=mean, logvar=logvar)
Example #6
Source File: basic.py From attention-lvcsr with MIT License | 5 votes |
def floor(x): """ Elemwise floor of `x`. """ # see decorator for function body
Example #7
Source File: theano_backend.py From deepQuest with BSD 3-Clause "New" or "Revised" License | 5 votes |
def floor(x): return T.floor(x) # UPDATES OPS
Example #8
Source File: model.py From Diffusion-Probabilistic-Models with MIT License | 4 votes |
def generate_forward_diffusion_sample(self, X_noiseless): """ Corrupt a training image with t steps worth of Gaussian noise, and return the corrupted image, as well as the mean and covariance of the posterior q(x^{t-1}|x^t, x^0). """ X_noiseless = X_noiseless.reshape( (-1, self.n_colors, self.spatial_width, self.spatial_width)) n_images = X_noiseless.shape[0].astype('int16') rng = Random().theano_rng # choose a timestep in [1, self.trajectory_length-1]. # note the reverse process is fixed for the very # first timestep, so we skip it. # TODO for some reason random_integer is missing from the Blocks # theano random number generator. t = T.floor(rng.uniform(size=(1,1), low=1, high=self.trajectory_length, dtype=theano.config.floatX)) t_weights = self.get_t_weights(t) N = rng.normal(size=(n_images, self.n_colors, self.spatial_width, self.spatial_width), dtype=theano.config.floatX) # noise added this time step beta_forward = self.get_beta_forward(t) # decay in noise variance due to original signal this step alpha_forward = 1. - beta_forward # compute total decay in the fraction of the variance due to X_noiseless alpha_arr = 1. - self.beta_arr alpha_cum_forward_arr = T.extra_ops.cumprod(alpha_arr).reshape((self.trajectory_length,1)) alpha_cum_forward = T.dot(t_weights.T, alpha_cum_forward_arr) # total fraction of the variance due to noise being mixed in beta_cumulative = 1. - alpha_cum_forward # total fraction of the variance due to noise being mixed in one step ago beta_cumulative_prior_step = 1. - alpha_cum_forward/alpha_forward # generate the corrupted training data X_uniformnoise = X_noiseless + (rng.uniform(size=(n_images, self.n_colors, self.spatial_width, self.spatial_width), dtype=theano.config.floatX)-T.constant(0.5,dtype=theano.config.floatX))*T.constant(self.uniform_noise,dtype=theano.config.floatX) X_noisy = X_uniformnoise*T.sqrt(alpha_cum_forward) + N*T.sqrt(1. - alpha_cum_forward) # compute the mean and covariance of the posterior distribution mu1_scl = T.sqrt(alpha_cum_forward / alpha_forward) mu2_scl = 1. / T.sqrt(alpha_forward) cov1 = 1. - alpha_cum_forward/alpha_forward cov2 = beta_forward / alpha_forward lam = 1./cov1 + 1./cov2 mu = ( X_uniformnoise * mu1_scl / cov1 + X_noisy * mu2_scl / cov2 ) / lam sigma = T.sqrt(1./lam) sigma = sigma.reshape((1,1,1,1)) mu.name = 'mu q posterior' sigma.name = 'sigma q posterior' X_noisy.name = 'X_noisy' t.name = 't' return X_noisy, t, mu, sigma
Example #9
Source File: optimizer.py From IQA_BIECON_release with MIT License | 4 votes |
def get_updates_sgd_momentum(self, cost, params, decay_mode=None, decay=0., momentum=0.9, nesterov=False, grad_clip=None, constant_clip=True): print(' - SGD: lr = %.2e' % (self.lr.get_value(borrow=True)), end='') print(', decay = %.2f' % (decay), end='') print(', momentum = %.2f' % (momentum), end='') print(', nesterov =', nesterov, end='') print(', grad_clip =', grad_clip) self.grad_clip = grad_clip self.constant_clip = constant_clip self.iterations = theano.shared( np.asarray(0., dtype=theano.config.floatX), borrow=True) # lr = self.lr_float lr = self.lr * (1.0 / (1.0 + decay * self.iterations)) # lr = self.lr * (decay ** T.floor(self.iterations / decay_step)) updates = [(self.iterations, self.iterations + 1.)] # Get gradients and apply clipping if self.grad_clip is None: grads = T.grad(cost, params) else: assert self.grad_clip > 0 if self.constant_clip: # Constant clipping using theano.gradient.grad_clip clip = self.grad_clip grads = T.grad( theano.gradient.grad_clip(cost, -clip, clip), params) else: # Adaptive clipping clip = self.grad_clip / lr grads_ = T.grad(cost, params) grads = [T.clip(g, -clip, clip) for g in grads_] for p, g in zip(params, grads): # v_prev = theano.shared(p.get_value(borrow=True) * 0.) p_val = p.get_value(borrow=True) v_prev = theano.shared(np.zeros(p_val.shape, dtype=p_val.dtype), broadcastable=p.broadcastable) v = momentum * v_prev - lr * g updates.append((v_prev, v)) if nesterov: new_p = p + momentum * v - lr * g else: new_p = p + v updates.append((p, new_p)) return updates
Example #10
Source File: POI2Vec.py From Point-of-Interest-Recommendation with MIT License | 4 votes |
def __theano_train__(self, n_size): """ Pr(l|u, C(l)) = Pr(l|u) * Pr(l|C(l)) Pr(u, l, t) = Pr(l|u, C(l)) if C(l) exists, Pr(l|u) otherwise. $Theta$ = argmax Pr(u, l, t) """ tra_mask = T.ivector() seq_length = T.sum(tra_mask) # 有效长度 wl = T.concatenate((self.wl, self.wl_m)) tidx, cidx, bidx, userid = T.ivector(), T.imatrix(), T.itensor3(), T.iscalar() pb = self.pb[bidx] # (seq_length x 4 x depth x n_size) lrs = self.lrs[tidx] # (seq_length x 4 x depth) # user preference xu = self.xu[userid] plu = softmax(T.dot(xu, self.wl.T)) # geographical influence cl = T.sum(wl[cidx], axis=1) # (seq_length x n_size) cl = cl.reshape((cl.shape[0], 1, 1, cl.shape[1])) br = sigmoid(T.sum(pb[:seq_length] * cl, axis=3) * lrs[:seq_length]) * T.ceil(abs(T.mean(cl, axis=3))) path = T.prod(br, axis=2) * self.probs[tidx][:seq_length] # paths = T.prod((T.floor(1-path) + path), axis=1) paths = T.sum(path, axis=1) paths = T.floor(1 - paths) + paths # ---------------------------------------------------------------------------- # cost, gradients, learning rate, l2 regularization lr, l2 = self.alpha_lambda[0], self.alpha_lambda[1] seq_l2_sq = T.sum([T.sum(par ** 2) for par in [xu, self.wl]]) upq = - 1 * T.sum(T.log(plu[tidx[:seq_length]] * paths)) / seq_length seq_costs = ( upq + 0.5 * l2 * seq_l2_sq) seq_grads = T.grad(seq_costs, self.params) seq_updates = [(par, par - lr * gra) for par, gra in zip(self.params, seq_grads)] pars_subs = [(self.xu, xu), (self.pb, pb)] seq_updates.extend([(par, T.set_subtensor(sub, sub - lr * T.grad(seq_costs, sub))) for par, sub in pars_subs]) # ---------------------------------------------------------------------------- uidx = T.iscalar() # T.iscalar()类型是 TensorType(int32, ) self.seq_train = theano.function( inputs=[uidx], outputs=upq, updates=seq_updates, givens={ userid: uidx, tidx: self.tra_target_masks[uidx], cidx: self.tra_context_masks[T.arange(self.tra_accum_lens[uidx][0], self.tra_accum_lens[uidx][1])], bidx: self.routes[self.tra_target_masks[uidx]], tra_mask: self.tra_masks[uidx] # tra_mask_cot: self.tra_masks_cot[T.arange(self.tra_accum_lens[uidx][0], self.tra_accum_lens[uidx][1])] })