Python torch.fmod() Examples
The following are 16
code examples of torch.fmod().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: learnJointCatPoseModel_top1_new.py From multi-modal-regression with MIT License | 6 votes |
def forward(self, x): x = self.feature_model(x) y0 = self.fc(x) Pc = F.softmax(y0, dim=1) y1 = torch.stack([self.bin_models[i](x) for i in range(self.num_classes)]).permute(1, 2, 0) Pl = F.softmax(y1, dim=1) Plc = Pl * torch.unsqueeze(Pc, dim=1) ind = torch.argmax(Plc.view(x.size(0), -1), dim=1, keepdim=True) ip = ind/self.num_classes ic = torch.fmod(ind, self.num_classes) label = torch.zeros(ic.size(0), self.num_classes).scatter_(1, ic.data.cpu(), 1.0) label = Variable(label.unsqueeze(2).cuda()) y1 = torch.squeeze(torch.bmm(y1, label), 2) if not args.multires: y2 = torch.stack([self.res_models[i](x) for i in range(self.num_classes)]).permute(1, 2, 0) y2 = torch.squeeze(torch.bmm(y2, label), 2) else: y2 = torch.stack([self.res_models[i](x) for i in range(self.num_classes * self.num_clusters)]) y2 = y2.view(self.num_classes, self.num_clusters, -1, self.ndim).permute(1, 2, 3, 0) y2 = torch.squeeze(torch.matmul(y2, label), 3) pose_label = torch.zeros(ip.size(0), self.num_clusters).scatter_(1, ip.data.cpu(), 1.0) pose_label = Variable(pose_label.unsqueeze(2).cuda()) y2 = torch.squeeze(torch.bmm(y2.permute(1, 2, 0), pose_label), 2) return [y0, y1, y2, Plc] # cat, pose_bin, pose_delta
Example #2
Source File: securenn.py From PySyft with Apache License 2.0 | 5 votes |
def decompose(tensor, field): """decompose a tensor into its binary representation.""" torch_dtype = get_torch_dtype(field) n_bits = get_n_bits(field) powers = torch.arange(n_bits, dtype=torch_dtype) if hasattr(tensor, "child") and isinstance(tensor.child, dict): powers = powers.send(*list(tensor.child.keys()), **no_wrap) for _ in range(len(tensor.shape)): powers = powers.unsqueeze(0) tensor = tensor.unsqueeze(-1) moduli = 2 ** powers tensor = torch.fmod((tensor / moduli.type_as(tensor)), 2) return tensor
Example #3
Source File: test_attention.py From translate with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _test_attention(self, attention): dummy_source_hids = torch.rand(self.src_len, self.bsz, self.ctx_dim) dummy_decoder_state = torch.rand(self.bsz, self.dec_dim) dummy_src_lengths = torch.fmod(torch.arange(self.bsz), self.src_len) + 1 attention(dummy_decoder_state, dummy_source_hids, dummy_src_lengths)
Example #4
Source File: arithmetics.py From heat with MIT License | 5 votes |
def fmod(t1, t2): """ Element-wise division remainder of values of operand t1 by values of operand t2 (i.e. C Library function fmod), not commutative. Takes the two operands (scalar or tensor, both may contain floating point number) whose elements are to be divided (operand 1 by operand 2) as arguments. Parameters ---------- t1: tensor or scalar The first operand whose values are divided (may be floats) t2: tensor or scalar The second operand by whose values is divided (may be floats) Returns ------- result: ht.DNDarray A tensor containing the remainder of the element-wise division (i.e. floating point values) of t1 by t2. It has the sign as the dividend t1. Examples: --------- >>> import heat as ht >>> ht.fmod(2.0, 2.0) tensor([0.]) >>> T1 = ht.float32([[1, 2], [3, 4]]) >>> T2 = ht.float32([[2, 2], [2, 2]]) >>> ht.fmod(T1, T2) tensor([[1., 0.], [1., 0.]]) >>> s = 2.0 >>> ht.fmod(s, T1) tensor([[0., 0.] [2., 2.]]) """ return operations.__binary_op(torch.fmod, t1, t2)
Example #5
Source File: ctc_prefix_score.py From espnet with Apache License 2.0 | 5 votes |
def index_select_state(self, state, best_ids): """Select CTC states according to best ids :param state : CTC state :param best_ids : index numbers selected by beam pruning (B, W) :return selected_state """ r, s, f_min, f_max, scoring_idmap = state # convert ids to BWO space vidx = (best_ids + self.pad_bo).view(-1) # select hypothesis scores s_new = torch.index_select(s.view(-1), 0, vidx) s_new = s_new.view(-1, 1).repeat(1, self.odim).view(self.n_bb, self.odim) # convert ids to BWS space (S: scoring_num) if scoring_idmap is not None: snum = self.scoring_num beam_idx = (torch.div(best_ids, self.odim) + self.pad_b).view(-1) label_ids = torch.fmod(best_ids, self.odim).view(-1) score_idx = scoring_idmap[beam_idx, label_ids] score_idx[score_idx == -1] = 0 vidx = score_idx + beam_idx * snum else: snum = self.odim # select forward probabilities r_new = torch.index_select(r.view(-1, 2, self.n_bb * snum), 2, vidx).view( -1, 2, self.n_bb ) return r_new, s_new, f_min, f_max
Example #6
Source File: evaluateGeodesicRegressionModel.py From multi-modal-regression with MIT License | 5 votes |
def myProj(x): angle = torch.norm(x, 2, 1, True) axis = F.normalize(x) angle = torch.fmod(angle, 2*np.pi) return angle*axis # my model for pose estimation: feature model + 1layer pose model x 12
Example #7
Source File: learnJointCatPoseModel_top1_new.py From multi-modal-regression with MIT License | 5 votes |
def testing(): model.eval() ytrue_cat, ytrue_pose = [], [] ypred_cat, ypred_pose = [], [] for i, sample in enumerate(test_loader): xdata = Variable(sample['xdata'].cuda()) output = model(xdata) output_cat = output[0] output_bin = output[1] output_res = output[2] joint_probs = output[3] ind = torch.argmax(joint_probs.view(xdata.size(0), -1), dim=1) ip = ind/num_classes ic = torch.fmod(ind, num_classes) tmp_labels = ic.data.cpu().numpy() ypred_cat.append(tmp_labels) label = Variable(sample['label']) ytrue_cat.append(sample['label'].squeeze().numpy()) ypred_bin = ip.data.cpu().numpy() ypred_res = output_res.data.cpu().numpy() ypred_pose.append(kmeans_dict[ypred_bin, :] + ypred_res) ytrue_pose.append(sample['ydata'].numpy()) del xdata, label, output, sample, output_cat, output_bin, output_res, joint_probs, ind, ip, ic gc.collect() ytrue_cat = np.concatenate(ytrue_cat) ypred_cat = np.concatenate(ypred_cat) ytrue_pose = np.concatenate(ytrue_pose) ypred_pose = np.concatenate(ypred_pose) model.train() return ytrue_cat, ytrue_pose, ypred_cat, ypred_pose
Example #8
Source File: learnGeodesicRegressionModel.py From multi-modal-regression with MIT License | 5 votes |
def myProj(x): angle = torch.norm(x, 2, 1, True) axis = F.normalize(x) angle = torch.fmod(angle, np.pi) return angle*axis # my_model
Example #9
Source File: ctc_prefix_score.py From adviser with GNU General Public License v3.0 | 5 votes |
def index_select_state(self, state, best_ids): """Select CTC states according to best ids :param state : CTC state :param best_ids : index numbers selected by beam pruning (B, W) :return selected_state """ r, s, f_min, f_max, scoring_idmap = state # convert ids to BWO space vidx = (best_ids + self.pad_bo).view(-1) # select hypothesis scores s_new = torch.index_select(s.view(-1), 0, vidx) s_new = s_new.view(-1, 1).repeat(1, self.odim).view(self.n_bb, self.odim) # convert ids to BWS space (S: scoring_num) if scoring_idmap is not None: snum = self.scoring_num beam_idx = (torch.div(best_ids, self.odim) + self.pad_b).view(-1) label_ids = torch.fmod(best_ids, self.odim).view(-1) score_idx = scoring_idmap[beam_idx, label_ids] score_idx[score_idx == -1] = 0 vidx = score_idx + beam_idx * snum else: snum = self.odim # select forward probabilities r_new = torch.index_select(r.view(-1, 2, self.n_bb * snum), 2, vidx).view(-1, 2, self.n_bb) return r_new, s_new, f_min, f_max
Example #10
Source File: primary_net.py From HyperNetworks with GNU General Public License v3.0 | 5 votes |
def __init__(self, z_num, z_dim): super(Embedding, self).__init__() self.z_list = nn.ParameterList() self.z_num = z_num self.z_dim = z_dim h,k = self.z_num for i in range(h): for j in range(k): self.z_list.append(Parameter(torch.fmod(torch.randn(self.z_dim).cuda(), 2)))
Example #11
Source File: hypernetwork_modules.py From HyperNetworks with GNU General Public License v3.0 | 5 votes |
def __init__(self, f_size = 3, z_dim = 64, out_size=16, in_size=16): super(HyperNetwork, self).__init__() self.z_dim = z_dim self.f_size = f_size self.out_size = out_size self.in_size = in_size self.w1 = Parameter(torch.fmod(torch.randn((self.z_dim, self.out_size*self.f_size*self.f_size)).cuda(),2)) self.b1 = Parameter(torch.fmod(torch.randn((self.out_size*self.f_size*self.f_size)).cuda(),2)) self.w2 = Parameter(torch.fmod(torch.randn((self.z_dim, self.in_size*self.z_dim)).cuda(),2)) self.b2 = Parameter(torch.fmod(torch.randn((self.in_size*self.z_dim)).cuda(),2))
Example #12
Source File: decoder.py From transformer-pytorch with MIT License | 5 votes |
def update_targets(targets, best_indices, idx, vocab_size): best_tensor_indices = torch.div(best_indices, vocab_size) best_token_indices = torch.fmod(best_indices, vocab_size) new_batch = torch.index_select(targets, 0, best_tensor_indices) new_batch[:, idx] = best_token_indices return new_batch
Example #13
Source File: test.py From SphericalViewSynthesis with BSD 2-Clause "Simplified" License | 5 votes |
def spiral_sampling(grid, percentage): b, c, h, w = grid.size() N = torch.tensor(h*w*percentage).int().float() sampling = torch.zeros_like(grid)[:, 0, :, :].unsqueeze(1) phi_k = torch.tensor(0.0).float() for k in torch.arange(N - 1): k = k.float() + 1.0 h_k = -1 + 2 * (k - 1) / (N - 1) theta_k = torch.acos(h_k) phi_k = phi_k + torch.tensor(3.6).float() / torch.sqrt(N) / torch.sqrt(1 - h_k * h_k) \ if k > 1.0 else torch.tensor(0.0).float() phi_k = torch.fmod(phi_k, 2 * numpy.pi) sampling[:, :, int(theta_k / numpy.pi * h) - 1, int(phi_k / numpy.pi / 2 * w) - 1] += 1.0 return (sampling > 0).float()
Example #14
Source File: evaluateGeodesicRegressionModel.py From multi-modal-regression with MIT License | 4 votes |
def step(self, closure=None): loss = None if closure is not None: loss = closure() for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] for p in group['params']: if p.grad is None: continue d_p = p.grad.data state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 state['step'] += 1 if weight_decay != 0: d_p.add_(weight_decay, p.data) if momentum != 0: param_state = self.state[p] if 'momentum_buffer' not in param_state: buf = param_state['momentum_buffer'] = torch.zeros_like(p.data) buf.mul_(momentum).add_(d_p) else: buf = param_state['momentum_buffer'] buf.mul_(momentum).add_(1 - dampening, d_p) if nesterov: d_p = d_p.add(momentum, buf) else: d_p = buf # cyclical learning rate t = (np.fmod(state['step']-1, self.c)+1)/self.c if t <= 0.5: step_size = (1-2*t)*group['alpha1'] + 2*t*group['alpha2'] else: step_size = 2*(1-t)*group['alpha2'] + (2*t-1)*group['alpha1'] writer.add_scalar('lr', step_size, state['step']) p.data.add_(-step_size, d_p) return loss
Example #15
Source File: distributions.py From uncertainty_estimation_deep_learning with MIT License | 4 votes |
def forward(self, indices, num_classes): if not self._random_off_targets: # construct one hot batch_size = indices.size(0) self._one_hot.resize_(batch_size, num_classes).fill_(0.0) self._ones.resize_(batch_size, num_classes).fill_(1.0) self._one_hot.scatter_(1, indices.view(-1,1), self._ones) one_hot_labels = self._one_hot # label smoothing smooth_positives = 1.0 - self._label_smoothing smooth_negatives = self._label_smoothing / num_classes return one_hot_labels * smooth_positives + smooth_negatives else: # construct one hot batch_size = indices.size(0) self._one_hot.resize_(batch_size, num_classes).fill_(0.0) self._ones.resize_(batch_size, num_classes).fill_(1.0) self._one_hot.scatter_(1, indices.view(-1,1), self._ones) positive_labels = self._one_hot smooth_positives = 1.0 - self._label_smoothing smooth_negatives = self._label_smoothing positive_labels = positive_labels * smooth_positives negative_labels = 1.0 - self._one_hot self._noise.resize_(batch_size, num_classes).uniform_(1e-1, 1.0) self._noise = self._noise * negative_labels self._noise = smooth_negatives * self._noise / self._noise.sum(dim=1, keepdim=True) one_hot_labels = positive_labels + self._noise return one_hot_labels # label smoothing # smooth_positives = 1.0 - self._label_smoothing # sum_negatives = self._label_smoothing # self._noise.resize_(batch_size, num_classes).random_(1e-5, 1-3) # self._noise = self._noise / self._noise.sum() # torch.random() # batch_size = indices.size(0) # self._one_hot.resize_(batch_size, num_classes).fill_(0.0) # self._ones.resize_(batch_size, num_classes).fill_(1.0) # self._one_hot.scatter_(1, indices.view(-1,1), self._ones) # torch.rand(1e-5, smooth_negatives) # offsets = torch.from_numpy(np.random.randint(low=1, high=num_classes, size=[batch_size])).cuda() # false_indices = torch.fmod((indices + offsets).float(), float(num_classes)).long() # self._ones.resize_(batch_size, num_classes).fill_(smooth_negatives) # self._one_hot.scatter_(1, false_indices.view(-1,1), self._ones) # one_hot_labels = self._one_hot * 1.0 return one_hot_labels
Example #16
Source File: auto_encoder.py From VQ-VAE with BSD 3-Clause "New" or "Revised" License | 4 votes |
def __init__(self, d, k=10, bn=True, vq_coef=1, commit_coef=0.5, num_channels=3, **kwargs): super(VQ_CVAE, self).__init__() self.encoder = nn.Sequential( nn.Conv2d(num_channels, d, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(d), nn.ReLU(inplace=True), nn.Conv2d(d, d, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(d), nn.ReLU(inplace=True), ResBlock(d, d, bn), nn.BatchNorm2d(d), ResBlock(d, d, bn), nn.BatchNorm2d(d), ) self.decoder = nn.Sequential( ResBlock(d, d), nn.BatchNorm2d(d), ResBlock(d, d), nn.ConvTranspose2d(d, d, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(d), nn.ReLU(inplace=True), nn.ConvTranspose2d(d, num_channels, kernel_size=4, stride=2, padding=1), ) self.d = d self.emb = NearestEmbed(k, d) self.vq_coef = vq_coef self.commit_coef = commit_coef self.mse = 0 self.vq_loss = torch.zeros(1) self.commit_loss = 0 for l in self.modules(): if isinstance(l, nn.Linear) or isinstance(l, nn.Conv2d): l.weight.detach().normal_(0, 0.02) torch.fmod(l.weight, 0.04) nn.init.constant_(l.bias, 0) self.encoder[-1].weight.detach().fill_(1 / 40) self.emb.weight.detach().normal_(0, 0.02) torch.fmod(self.emb.weight, 0.04)