Python torch.cos() Examples
The following are 30
code examples of torch.cos().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: common.py From decaNLP with BSD 3-Clause "New" or "Revised" License | 6 votes |
def positional_encodings_like(x, t=None): if t is None: positions = torch.arange(0., x.size(1)) if x.is_cuda: positions = positions.cuda(x.get_device()) else: positions = t encodings = torch.zeros(*x.size()[1:]) if x.is_cuda: encodings = encodings.cuda(x.get_device()) for channel in range(x.size(-1)): if channel % 2 == 0: encodings[:, channel] = torch.sin( positions / 10000 ** (channel / x.size(2))) else: encodings[:, channel] = torch.cos( positions / 10000 ** ((channel - 1) / x.size(2))) return Variable(encodings) # torch.matmul can't do (4, 3, 2) @ (4, 2) -> (4, 3)
Example #2
Source File: Transformer.py From ConvLab with MIT License | 6 votes |
def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None): ''' Sinusoid position encoding table ''' def cal_angle(position, hid_idx): return position / np.power(10000, 2 * (hid_idx // 2) / d_hid) def get_posi_angle_vec(position): return [cal_angle(position, hid_j) for hid_j in range(d_hid)] sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)]) sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 if padding_idx is not None: # zero vector for padding dimension sinusoid_table[padding_idx] = 0. return torch.FloatTensor(sinusoid_table)
Example #3
Source File: test_compliance_kaldi.py From audio with BSD 2-Clause "Simplified" License | 6 votes |
def _create_data_set(self): # used to generate the dataset to test on. this is not used in testing (offline procedure) test_filepath = common_utils.get_asset_path('kaldi_file.wav') sr = 16000 x = torch.arange(0, 20).float() # between [-6,6] y = torch.cos(2 * math.pi * x) + 3 * torch.sin(math.pi * x) + 2 * torch.cos(x) # between [-2^30, 2^30] y = (y / 6 * (1 << 30)).long() # clear the last 16 bits because they aren't used anyways y = ((y >> 16) << 16).float() torchaudio.save(test_filepath, y, sr) sound, sample_rate = torchaudio.load(test_filepath, normalization=False) print(y >> 16) self.assertTrue(sample_rate == sr) torch.testing.assert_allclose(y, sound)
Example #4
Source File: sinusoidal_positional_embedding.py From crosentgec with GNU General Public License v3.0 | 6 votes |
def get_embedding(num_embeddings, embedding_dim, padding_idx=None): """Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb
Example #5
Source File: gridgen.py From cascade-rcnn_Pytorch with MIT License | 6 votes |
def __init__(self, height, width, lr = 1, aux_loss = False): super(DenseAffine3DGridGen, self).__init__() self.height, self.width = height, width self.aux_loss = aux_loss self.lr = lr self.grid = np.zeros( [self.height, self.width, 3], dtype=np.float32) self.grid[:,:,0] = np.expand_dims(np.repeat(np.expand_dims(np.arange(-1, 1, 2.0/self.height), 0), repeats = self.width, axis = 0).T, 0) self.grid[:,:,1] = np.expand_dims(np.repeat(np.expand_dims(np.arange(-1, 1, 2.0/self.width), 0), repeats = self.height, axis = 0), 0) self.grid[:,:,2] = np.ones([self.height, width]) self.grid = torch.from_numpy(self.grid.astype(np.float32)) self.theta = self.grid[:,:,0] * np.pi/2 + np.pi/2 self.phi = self.grid[:,:,1] * np.pi self.x = torch.sin(self.theta) * torch.cos(self.phi) self.y = torch.sin(self.theta) * torch.sin(self.phi) self.z = torch.cos(self.theta) self.grid3d = torch.from_numpy(np.zeros( [self.height, self.width, 4], dtype=np.float32)) self.grid3d[:,:,0] = self.x self.grid3d[:,:,1] = self.y self.grid3d[:,:,2] = self.z self.grid3d[:,:,3] = self.grid[:,:,2]
Example #6
Source File: gridgen.py From cascade-rcnn_Pytorch with MIT License | 6 votes |
def __init__(self, height, width, lr = 1, aux_loss = False): super(DenseAffine3DGridGen_rotate, self).__init__() self.height, self.width = height, width self.aux_loss = aux_loss self.lr = lr self.grid = np.zeros( [self.height, self.width, 3], dtype=np.float32) self.grid[:,:,0] = np.expand_dims(np.repeat(np.expand_dims(np.arange(-1, 1, 2.0/self.height), 0), repeats = self.width, axis = 0).T, 0) self.grid[:,:,1] = np.expand_dims(np.repeat(np.expand_dims(np.arange(-1, 1, 2.0/self.width), 0), repeats = self.height, axis = 0), 0) self.grid[:,:,2] = np.ones([self.height, width]) self.grid = torch.from_numpy(self.grid.astype(np.float32)) self.theta = self.grid[:,:,0] * np.pi/2 + np.pi/2 self.phi = self.grid[:,:,1] * np.pi self.x = torch.sin(self.theta) * torch.cos(self.phi) self.y = torch.sin(self.theta) * torch.sin(self.phi) self.z = torch.cos(self.theta) self.grid3d = torch.from_numpy(np.zeros( [self.height, self.width, 4], dtype=np.float32)) self.grid3d[:,:,0] = self.x self.grid3d[:,:,1] = self.y self.grid3d[:,:,2] = self.z self.grid3d[:,:,3] = self.grid[:,:,2]
Example #7
Source File: gridgen.py From cascade-rcnn_Pytorch with MIT License | 6 votes |
def __init__(self, height, width, lr = 1, aux_loss = False): super(Depth3DGridGen, self).__init__() self.height, self.width = height, width self.aux_loss = aux_loss self.lr = lr self.grid = np.zeros( [self.height, self.width, 3], dtype=np.float32) self.grid[:,:,0] = np.expand_dims(np.repeat(np.expand_dims(np.arange(-1, 1, 2.0/self.height), 0), repeats = self.width, axis = 0).T, 0) self.grid[:,:,1] = np.expand_dims(np.repeat(np.expand_dims(np.arange(-1, 1, 2.0/self.width), 0), repeats = self.height, axis = 0), 0) self.grid[:,:,2] = np.ones([self.height, width]) self.grid = torch.from_numpy(self.grid.astype(np.float32)) self.theta = self.grid[:,:,0] * np.pi/2 + np.pi/2 self.phi = self.grid[:,:,1] * np.pi self.x = torch.sin(self.theta) * torch.cos(self.phi) self.y = torch.sin(self.theta) * torch.sin(self.phi) self.z = torch.cos(self.theta) self.grid3d = torch.from_numpy(np.zeros( [self.height, self.width, 4], dtype=np.float32)) self.grid3d[:,:,0] = self.x self.grid3d[:,:,1] = self.y self.grid3d[:,:,2] = self.z self.grid3d[:,:,3] = self.grid[:,:,2]
Example #8
Source File: kaldi.py From audio with BSD 2-Clause "Simplified" License | 6 votes |
def _feature_window_function(window_type: str, window_size: int, blackman_coeff: float, device: torch.device, dtype: int, ) -> Tensor: r"""Returns a window function with the given type and size """ if window_type == HANNING: return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype) elif window_type == HAMMING: return torch.hamming_window(window_size, periodic=False, alpha=0.54, beta=0.46, device=device, dtype=dtype) elif window_type == POVEY: # like hanning but goes to zero at edges return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype).pow(0.85) elif window_type == RECTANGULAR: return torch.ones(window_size, device=device, dtype=dtype) elif window_type == BLACKMAN: a = 2 * math.pi / (window_size - 1) window_function = torch.arange(window_size, device=device, dtype=dtype) # can't use torch.blackman_window as they use different coefficients return (blackman_coeff - 0.5 * torch.cos(a * window_function) + (0.5 - blackman_coeff) * torch.cos(2 * a * window_function)).to(device=device, dtype=dtype) else: raise Exception('Invalid window type ' + window_type)
Example #9
Source File: loss.py From torch-toolbox with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__( self, classes, m=0.5, s=64, easy_margin=True, weight=None, size_average=None, ignore_index=-100, reduce=None, reduction='mean'): super(ArcLoss, self).__init__(weight, size_average, reduce, reduction) self.ignore_index = ignore_index assert s > 0. assert 0 <= m <= (math.pi / 2) self.s = s self.m = m self.cos_m = math.cos(m) self.sin_m = math.sin(m) self.mm = math.sin(math.pi - m) * m self.threshold = math.cos(math.pi - m) self.classes = classes self.easy_margin = easy_margin
Example #10
Source File: loss.py From torch-toolbox with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _get_body(self, x, target): cos_t = torch.gather(x, 1, target.unsqueeze(1)) # cos(theta_yi) if self.easy_margin: cond = torch.relu(cos_t) else: cond_v = cos_t - self.threshold cond = torch.relu(cond_v) cond = cond.bool() # Apex would convert FP16 to FP32 here # cos(theta_yi + m) new_zy = torch.cos(torch.acos(cos_t) + self.m).type(cos_t.dtype) if self.easy_margin: zy_keep = cos_t else: zy_keep = cos_t - self.mm # (cos(theta_yi) - sin(pi - m)*m) new_zy = torch.where(cond, new_zy, zy_keep) diff = new_zy - cos_t # cos(theta_yi + m) - cos(theta_yi) gt_one_hot = F.one_hot(target, num_classes=self.classes) body = gt_one_hot * diff return body
Example #11
Source File: attention.py From Extremely-Fine-Grained-Entity-Typing with MIT License | 5 votes |
def __init__(self, d_model, dropout, max_len=5000): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) # Compute the positional encodings once in log space. pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len).unsqueeze(1).float() div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe)
Example #12
Source File: transforms_rbbox.py From AerialDetection with Apache License 2.0 | 5 votes |
def RotBox2Polys_torch(dboxes): """ :param dboxes: :return: """ cs = torch.cos(dboxes[:, 4]) ss = torch.sin(dboxes[:, 4]) w = dboxes[:, 2] - 1 h = dboxes[:, 3] - 1 x_ctr = dboxes[:, 0] y_ctr = dboxes[:, 1] x1 = x_ctr + cs * (w / 2.0) - ss * (-h / 2.0) x2 = x_ctr + cs * (w / 2.0) - ss * (h / 2.0) x3 = x_ctr + cs * (-w / 2.0) - ss * (h / 2.0) x4 = x_ctr + cs * (-w / 2.0) - ss * (-h / 2.0) y1 = y_ctr + ss * (w / 2.0) + cs * (-h / 2.0) y2 = y_ctr + ss * (w / 2.0) + cs * (h / 2.0) y3 = y_ctr + ss * (-w / 2.0) + cs * (h / 2.0) y4 = y_ctr + ss * (-w / 2.0) + cs * (-h / 2.0) polys = torch.cat((x1.unsqueeze(1), y1.unsqueeze(1), x2.unsqueeze(1), y2.unsqueeze(1), x3.unsqueeze(1), y3.unsqueeze(1), x4.unsqueeze(1), y4.unsqueeze(1)), 1) return polys
Example #13
Source File: plane.py From nsf with MIT License | 5 votes |
def create_circle(num_per_circle, std=0.1): u = torch.rand(num_per_circle) x1 = torch.cos(2 * np.pi * u) x2 = torch.sin(2 * np.pi * u) data = 2 * torch.stack((x1, x2)).t() data += std * torch.randn(data.shape) return data
Example #14
Source File: nn_proc.py From signaltrain with GNU General Public License v3.0 | 5 votes |
def forward(self, x_cuda, knobs_cuda, return_acts=False): # trainable STFT, outputs spectrograms for real & imag parts x_real, x_imag = self.dft_analysis.forward(x_cuda/2) # the /2 is cheap way to help us approach 'unit variaance' of -0.5 and .5 # Magnitude-Phase computation mag = torch.norm(torch.cat((x_real.unsqueeze(0), x_imag.unsqueeze(0)), 0), 2, dim=0) phs = torch.atan2(x_imag.float(), x_real.float()+1e-7).to(x_cuda.dtype) if return_acts: layer_acts = [x_real, x_imag, mag, phs] # Processes Magnitude and phase individually mag_hat, m_acts = self.aenc.forward(mag, knobs_cuda, skip_connections='sf', return_acts=return_acts) phs_hat, p_acts = self.phs_aenc.forward(phs, knobs_cuda, skip_connections='', return_acts=return_acts) if return_acts: layer_acts.extend(m_acts) layer_acts.extend(p_acts) output_phs_dim = phs_hat.size()[1] phs_hat = phs_hat + phs[:,-output_phs_dim:,:] # <-- residual skip connection. Slightly smoother convergence # Back to Real and Imaginary an_real = mag_hat * torch.cos(phs_hat) an_imag = mag_hat * torch.sin(phs_hat) # Forward synthesis pass x_fwdsyn = self.dft_synthesis.forward(an_real, an_imag) # final skip residual y_hat = x_fwdsyn + x_cuda[:,-x_fwdsyn.size()[-1]:]/2 if return_acts: layer_acts.extend([mag_hat, phs_hat, an_real, an_imag, x_fwdsyn, y_hat]) if return_acts: return 2*y_hat, mag, mag_hat, layer_acts # undo the /2 at the beginning else: return 2*y_hat, mag, mag_hat
Example #15
Source File: optimization.py From crosentgec with GNU General Public License v3.0 | 5 votes |
def warmup_cosine(x, warmup=0.002): if x < warmup: return x/warmup return 0.5 * (1.0 + torch.cos(math.pi * x))
Example #16
Source File: annotated_attention.py From Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch with MIT License | 5 votes |
def __init__(self, d_model, dropout, max_len=5000): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) # Compute the positional encodings once in log space. pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe)
Example #17
Source File: inverse_warp.py From SfmLearner-Pytorch with MIT License | 5 votes |
def euler2mat(angle): """Convert euler angles to rotation matrix. Reference: https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174 Args: angle: rotation angle along 3 axis (in radians) -- size = [B, 3] Returns: Rotation matrix corresponding to the euler angles -- size = [B, 3, 3] """ B = angle.size(0) x, y, z = angle[:,0], angle[:,1], angle[:,2] cosz = torch.cos(z) sinz = torch.sin(z) zeros = z.detach()*0 ones = zeros.detach()+1 zmat = torch.stack([cosz, -sinz, zeros, sinz, cosz, zeros, zeros, zeros, ones], dim=1).reshape(B, 3, 3) cosy = torch.cos(y) siny = torch.sin(y) ymat = torch.stack([cosy, zeros, siny, zeros, ones, zeros, -siny, zeros, cosy], dim=1).reshape(B, 3, 3) cosx = torch.cos(x) sinx = torch.sin(x) xmat = torch.stack([ones, zeros, zeros, zeros, cosx, -sinx, zeros, sinx, cosx], dim=1).reshape(B, 3, 3) rotMat = xmat @ ymat @ zmat return rotMat
Example #18
Source File: dimenet.py From pytorch_geometric with MIT License | 5 votes |
def __init__(self, num_spherical, num_radial, cutoff=5.0, envelope_exponent=5): super(SphericalBasisLayer, self).__init__() assert num_radial <= 64 self.num_spherical = num_spherical self.num_radial = num_radial self.cutoff = cutoff self.envelope = Envelope(envelope_exponent) bessel_forms = bessel_basis(num_spherical, num_radial) sph_harm_forms = real_sph_harm(num_spherical) self.sph_funcs = [] self.bessel_funcs = [] x, theta = sym.symbols('x theta') modules = {'sin': torch.sin, 'cos': torch.cos} for i in range(num_spherical): if i == 0: sph1 = sym.lambdify([theta], sph_harm_forms[i][0], modules)(0) self.sph_funcs.append(lambda x: torch.zeros_like(x) + sph1) else: sph = sym.lambdify([theta], sph_harm_forms[i][0], modules) self.sph_funcs.append(sph) for j in range(num_radial): bessel = sym.lambdify([x], bessel_forms[i][j], modules) self.bessel_funcs.append(bessel)
Example #19
Source File: Model.py From 3D-BoundingBox with MIT License | 5 votes |
def OrientationLoss(orient_batch, orientGT_batch, confGT_batch): batch_size = orient_batch.size()[0] indexes = torch.max(confGT_batch, dim=1)[1] # extract just the important bin orientGT_batch = orientGT_batch[torch.arange(batch_size), indexes] orient_batch = orient_batch[torch.arange(batch_size), indexes] theta_diff = torch.atan2(orientGT_batch[:,1], orientGT_batch[:,0]) estimated_theta_diff = torch.atan2(orient_batch[:,1], orient_batch[:,0]) return -1 * torch.cos(theta_diff - estimated_theta_diff).mean()
Example #20
Source File: Model.py From 3D-BoundingBox with MIT License | 5 votes |
def __init__(self, features=None, bins=2, w = 0.4): super(Model, self).__init__() self.bins = bins self.w = w self.features = features self.orientation = nn.Sequential( nn.Linear(512 * 7 * 7, 256), nn.ReLU(True), nn.Dropout(), nn.Linear(256, 256), nn.ReLU(True), nn.Dropout(), nn.Linear(256, bins*2) # to get sin and cos ) self.confidence = nn.Sequential( nn.Linear(512 * 7 * 7, 256), nn.ReLU(True), nn.Dropout(), nn.Linear(256, 256), nn.ReLU(True), nn.Dropout(), nn.Linear(256, bins), # nn.Softmax() #nn.Sigmoid() ) self.dimension = nn.Sequential( nn.Linear(512 * 7 * 7, 512), nn.ReLU(True), nn.Dropout(), nn.Linear(512, 512), nn.ReLU(True), nn.Dropout(), nn.Linear(512, 3) )
Example #21
Source File: transforms_rbbox.py From AerialDetection with Apache License 2.0 | 5 votes |
def RotBox2Polys(dboxes): """ :param dboxes: (x_ctr, y_ctr, w, h, angle) (numboxes, 5) :return: quadranlges: (numboxes, 8) """ cs = np.cos(dboxes[:, 4]) ss = np.sin(dboxes[:, 4]) w = dboxes[:, 2] - 1 h = dboxes[:, 3] - 1 ## change the order to be the initial definition x_ctr = dboxes[:, 0] y_ctr = dboxes[:, 1] x1 = x_ctr + cs * (w / 2.0) - ss * (-h / 2.0) x2 = x_ctr + cs * (w / 2.0) - ss * (h / 2.0) x3 = x_ctr + cs * (-w / 2.0) - ss * (h / 2.0) x4 = x_ctr + cs * (-w / 2.0) - ss * (-h / 2.0) y1 = y_ctr + ss * (w / 2.0) + cs * (-h / 2.0) y2 = y_ctr + ss * (w / 2.0) + cs * (h / 2.0) y3 = y_ctr + ss * (-w / 2.0) + cs * (h / 2.0) y4 = y_ctr + ss * (-w / 2.0) + cs * (-h / 2.0) x1 = x1[:, np.newaxis] y1 = y1[:, np.newaxis] x2 = x2[:, np.newaxis] y2 = y2[:, np.newaxis] x3 = x3[:, np.newaxis] y3 = y3[:, np.newaxis] x4 = x4[:, np.newaxis] y4 = y4[:, np.newaxis] polys = np.concatenate((x1, y1, x2, y2, x3, y3, x4, y4), axis=1) return polys
Example #22
Source File: transforms_rbbox.py From AerialDetection with Apache License 2.0 | 5 votes |
def dbbox2delta_v3(proposals, gt, means = [0, 0, 0, 0, 0], stds=[1, 1, 1, 1, 1]): """ This version removes the module operation :param proposals: (x_ctr, y_ctr, w, h, angle) shape (n, 5) :param gt: (x_ctr, y_ctr, w, h, angle) :param means: :param stds: :return: encoded targets: shape (n, 5) """ proposals = proposals.float() gt = gt.float() gt_widths = gt[..., 2] gt_heights = gt[..., 3] gt_angle = gt[..., 4] proposals_widths = proposals[..., 2] proposals_heights = proposals[..., 3] proposals_angle = proposals[..., 4] coord = gt[..., 0:2] - proposals[..., 0:2] dx = (torch.cos(proposals[..., 4]) * coord[..., 0] + torch.sin(proposals[..., 4]) * coord[..., 1]) / proposals_widths dy = (-torch.sin(proposals[..., 4]) * coord[..., 0] + torch.cos(proposals[..., 4]) * coord[..., 1]) / proposals_heights dw = torch.log(gt_widths / proposals_widths) dh = torch.log(gt_heights / proposals_heights) # import pdb # print('in dbbox2delta v3') # pdb.set_trace() # dangle = (gt_angle - proposals_angle) % (2 * math.pi) / (2 * math.pi) # TODO: debug for it, proposals_angle are -1.5708, gt_angle should close to -1.57, actully they close to 5.0153 dangle = gt_angle - proposals_angle deltas = torch.stack((dx, dy, dw, dh, dangle), -1) means = deltas.new_tensor(means).unsqueeze(0) stds = deltas.new_tensor(stds).unsqueeze(0) deltas = deltas.sub_(means).div_(stds) return deltas
Example #23
Source File: transforms_rbbox.py From AerialDetection with Apache License 2.0 | 5 votes |
def dbbox2delta(proposals, gt, means = [0, 0, 0, 0, 0], stds=[1, 1, 1, 1, 1]): """ :param proposals: (x_ctr, y_ctr, w, h, angle) shape (n, 5) :param gt: (x_ctr, y_ctr, w, h, angle) :param means: :param stds: :return: encoded targets: shape (n, 5) """ proposals = proposals.float() gt = gt.float() gt_widths = gt[..., 2] gt_heights = gt[..., 3] gt_angle = gt[..., 4] proposals_widths = proposals[..., 2] proposals_heights = proposals[..., 3] proposals_angle = proposals[..., 4] coord = gt[..., 0:2] - proposals[..., 0:2] dx = (torch.cos(proposals[..., 4]) * coord[..., 0] + torch.sin(proposals[..., 4]) * coord[..., 1]) / proposals_widths dy = (-torch.sin(proposals[..., 4]) * coord[..., 0] + torch.cos(proposals[..., 4]) * coord[..., 1]) / proposals_heights dw = torch.log(gt_widths / proposals_widths) dh = torch.log(gt_heights / proposals_heights) dangle = (gt_angle - proposals_angle) % (2 * math.pi) / (2 * math.pi) deltas = torch.stack((dx, dy, dw, dh, dangle), -1) means = deltas.new_tensor(means).unsqueeze(0) stds = deltas.new_tensor(stds).unsqueeze(0) deltas = deltas.sub_(means).div_(stds) # TODO: expand bbox regression return deltas
Example #24
Source File: Embeddings.py From video-caption-openNMT.pytorch with MIT License | 5 votes |
def __init__(self, dropout, dim, max_len=5000): pe = torch.zeros(max_len, dim) position = torch.arange(0, max_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, dim, 2) * -(math.log(10000.0) / dim)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(1) super(PositionalEncoding, self).__init__() self.register_buffer('pe', pe) self.dropout = nn.Dropout(p=dropout) self.dim = dim
Example #25
Source File: box_util.py From H3DNet with MIT License | 5 votes |
def rotz_batch_pytorch(t): """Rotation about the y-axis. t: (x1,x2,...xn) return: (x1,x2,...,xn,3,3) """ input_shape = t.shape output = torch.zeros(tuple(list(input_shape)+[3,3])).cuda() c = torch.cos(t) s = torch.sin(t) output[...,0,0] = c output[...,0,1] = -s output[...,1,0] = s output[...,1,1] = c output[...,2,2] = 1 return output
Example #26
Source File: box_util.py From H3DNet with MIT License | 5 votes |
def roty_batch_pytorch(t): """Rotation about the y-axis. t: (x1,x2,...xn) return: (x1,x2,...,xn,3,3) """ input_shape = t.shape output = torch.zeros(tuple(list(input_shape)+[3,3])).cuda() c = torch.cos(t) s = torch.sin(t) output[...,0,0] = c output[...,0,2] = s output[...,1,1] = 1 output[...,2,0] = -s output[...,2,2] = c return output
Example #27
Source File: box_util.py From H3DNet with MIT License | 5 votes |
def roty_batch(t): """Rotation about the y-axis. t: (x1,x2,...xn) return: (x1,x2,...,xn,3,3) """ input_shape = t.shape output = np.zeros(tuple(list(input_shape)+[3,3])) c = np.cos(t) s = np.sin(t) output[...,0,0] = c output[...,0,2] = s output[...,1,1] = 1 output[...,2,0] = -s output[...,2,2] = c return output
Example #28
Source File: box_util.py From H3DNet with MIT License | 5 votes |
def roty(t): """Rotation about the y-axis.""" c = np.cos(t) s = np.sin(t) return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
Example #29
Source File: Transformer.py From ConvLab with MIT License | 5 votes |
def __init__(self, d_model, max_len=512): super(PositionalEmbedding, self).__init__() # Compute the positional encodings once in log space. pe = torch.zeros(max_len, d_model).float() pe.require_grad = False position = torch.arange(0, max_len).float().unsqueeze(1) div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp() pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe)
Example #30
Source File: transformer.py From Doc2EDAG with MIT License | 5 votes |
def __init__(self, d_model, dropout, max_len=5000): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) # Compute the positional encodings once in log space. pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe)