Python torch.mm() Examples
The following are 30
code examples of torch.mm().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: test_con.py From SlowFast-Network-pytorch with MIT License | 7 votes |
def test_grad(): input=tensor(([1,2,3],[4,5,6],[7,8,9]),dtype=torch.float) #weight=tensor(([0.1,0.2,0.3,0.4],[0.1,0.2,0.3,0.4],[0.1,0.2,0.3,0.4]),requires_grad=True) weight=tensor(torch.rand(3, 4),requires_grad=True) #input=input.unsqueeze(0) print(input,weight) pre=torch.mm(input,weight) #loss1=f.multilabel_soft_margin_loss() loss2=nn.MultiLabelMarginLoss() lable1=tensor(([0, 1, 1,0],),dtype=torch.float) lable2 = tensor(([0, 1, 1,0], [1, 0, 0,0], [1, 0,1 ,1]), dtype=torch.long) print(pre,lable1) loss1=f.multilabel_soft_margin_loss(pre,lable1,reduction='sum') loss1.backward() print('weight.grad.data1:',weight.grad.data) # loss2 = loss2(pre, lable2) # loss2.backward() # print('weight.grad.data2:', weight.grad.data)
Example #2
Source File: layers.py From DropEdge with MIT License | 7 votes |
def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.spmm(adj, support) # Self-loop if self.self_weight is not None: output = output + torch.mm(input, self.self_weight) if self.bias is not None: output = output + self.bias # BN if self.bn is not None: output = self.bn(output) # Res if self.res: return self.sigma(output) + input else: return self.sigma(output)
Example #3
Source File: manager.py From gnn-comparison with GNU General Public License v3.0 | 6 votes |
def _vertex_decimation(self, L): max_eigenvec = self._power_iteration(L) v_plus, v_minus = (max_eigenvec >= 0).squeeze(), (max_eigenvec < 0).squeeze() # print(v_plus, v_minus) # diagonal matrix, swap v_minus with v_plus not to incur in errors (does not change the matrix) if torch.sum(v_plus) == 0.: # The matrix is diagonal, cannot reduce further if torch.sum(v_minus) == 0.: assert v_minus.shape[0] == L.shape[0], (v_minus.shape, L.shape) # I assumed v_minus should have ones, but this is not necessarily the case. So I added this if return torch.ones(v_minus.shape), L else: return v_minus, L L_plus_plus = L[v_plus][:, v_plus] L_plus_minus = L[v_plus][:, v_minus] L_minus_minus = L[v_minus][:, v_minus] L_minus_plus = L[v_minus][:, v_plus] L_new = L_plus_plus - torch.mm(torch.mm(L_plus_minus, torch.inverse(L_minus_minus)), L_minus_plus) return v_plus, L_new
Example #4
Source File: torch_utils.py From pruning_yolov3 with GNU General Public License v3.0 | 6 votes |
def fuse_conv_and_bn(conv, bn): # https://tehnokv.com/posts/fusing-batchnorm-and-conv/ with torch.no_grad(): # init fusedconv = torch.nn.Conv2d(conv.in_channels, conv.out_channels, kernel_size=conv.kernel_size, stride=conv.stride, padding=conv.padding, bias=True) # prepare filters w_conv = conv.weight.clone().view(conv.out_channels, -1) w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size())) # prepare spatial bias if conv.bias is not None: b_conv = conv.bias else: b_conv = torch.zeros(conv.weight.size(0)) b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) fusedconv.bias.copy_(b_conv + b_bn) return fusedconv
Example #5
Source File: mmd.py From transferlearning with MIT License | 6 votes |
def cmmd(source, target, s_label, t_label, kernel_mul=2.0, kernel_num=5, fix_sigma=None): s_label = s_label.cpu() s_label = s_label.view(32,1) s_label = torch.zeros(32, 31).scatter_(1, s_label.data, 1) s_label = Variable(s_label).cuda() t_label = t_label.cpu() t_label = t_label.view(32, 1) t_label = torch.zeros(32, 31).scatter_(1, t_label.data, 1) t_label = Variable(t_label).cuda() batch_size = int(source.size()[0]) kernels = guassian_kernel(source, target, kernel_mul=kernel_mul, kernel_num=kernel_num, fix_sigma=fix_sigma) loss = 0 XX = kernels[:batch_size, :batch_size] YY = kernels[batch_size:, batch_size:] XY = kernels[:batch_size, batch_size:] loss += torch.mean(torch.mm(s_label, torch.transpose(s_label, 0, 1)) * XX + torch.mm(t_label, torch.transpose(t_label, 0, 1)) * YY - 2 * torch.mm(s_label, torch.transpose(t_label, 0, 1)) * XY) return loss
Example #6
Source File: mmd.py From transferlearning with MIT License | 6 votes |
def _mix_rbf_kernel(X, Y, sigma_list): assert(X.size(0) == Y.size(0)) m = X.size(0) Z = torch.cat((X, Y), 0) ZZT = torch.mm(Z, Z.t()) diag_ZZT = torch.diag(ZZT).unsqueeze(1) Z_norm_sqr = diag_ZZT.expand_as(ZZT) exponent = Z_norm_sqr - 2 * ZZT + Z_norm_sqr.t() K = 0.0 for sigma in sigma_list: gamma = 1.0 / (2 * sigma**2) K += torch.exp(-gamma * exponent) return K[:m, :m], K[:m, m:], K[m:, m:], len(sigma_list)
Example #7
Source File: model.py From LiteralE with Apache License 2.0 | 6 votes |
def forward(self, e1, rel): e1_embedded= self.emb_e(e1).view(Config.batch_size, 1, 10, 20) rel_embedded = self.emb_rel(rel).view(Config.batch_size, 1, 10, 20) stacked_inputs = torch.cat([e1_embedded, rel_embedded], 2) stacked_inputs = self.bn0(stacked_inputs) x= self.inp_drop(stacked_inputs) x= self.conv1(x) x= self.bn1(x) x= F.relu(x) x = self.feature_map_drop(x) x = x.view(Config.batch_size, -1) #print(x.size()) x = self.fc(x) x = self.hidden_drop(x) x = self.bn2(x) x = F.relu(x) x = torch.mm(x, self.emb_e.weight.transpose(1,0)) x += self.b.expand_as(x) pred = F.sigmoid(x) return pred
Example #8
Source File: model.py From LiteralE with Apache License 2.0 | 6 votes |
def forward(self, e1, rel): e1_embedded_real = self.inp_drop(self.emb_e_real(e1)).view(Config.batch_size, -1) rel_embedded_real = self.inp_drop(self.emb_rel_real(rel)).view(Config.batch_size, -1) e1_embedded_img = self.inp_drop(self.emb_e_img(e1)).view(Config.batch_size, -1) rel_embedded_img = self.inp_drop(self.emb_rel_img(rel)).view(Config.batch_size, -1) e1_embedded_real = self.inp_drop(e1_embedded_real) rel_embedded_real = self.inp_drop(rel_embedded_real) e1_embedded_img = self.inp_drop(e1_embedded_img) rel_embedded_img = self.inp_drop(rel_embedded_img) # complex space bilinear product (equivalent to HolE) realrealreal = torch.mm(e1_embedded_real*rel_embedded_real, self.emb_e_real.weight.transpose(1,0)) realimgimg = torch.mm(e1_embedded_real*rel_embedded_img, self.emb_e_img.weight.transpose(1,0)) imgrealimg = torch.mm(e1_embedded_img*rel_embedded_real, self.emb_e_img.weight.transpose(1,0)) imgimgreal = torch.mm(e1_embedded_img*rel_embedded_img, self.emb_e_real.weight.transpose(1,0)) pred = realrealreal + realimgimg + imgrealimg - imgimgreal pred = F.sigmoid(pred) return pred
Example #9
Source File: utils_regularizers.py From KAIR with MIT License | 6 votes |
def regularizer_orth2(m): """ # ---------------------------------------- # Applies regularization to the training by performing the # orthogonalization technique described in the paper # This function is to be called by the torch.nn.Module.apply() method, # which applies svd_orthogonalization() to every layer of the model. # usage: net.apply(regularizer_orth2) # ---------------------------------------- """ classname = m.__class__.__name__ if classname.find('Conv') != -1: w = m.weight.data.clone() c_out, c_in, f1, f2 = w.size() # dtype = m.weight.data.type() w = w.permute(2, 3, 1, 0).contiguous().view(f1*f2*c_in, c_out) u, s, v = torch.svd(w) s_mean = s.mean() s[s > 1.5*s_mean] = s[s > 1.5*s_mean] - 1e-4 s[s < 0.5*s_mean] = s[s < 0.5*s_mean] + 1e-4 w = torch.mm(torch.mm(u, torch.diag(s)), v.t()) m.weight.data = w.view(f1, f2, c_in, c_out).permute(3, 2, 0, 1) # .type(dtype) else: pass
Example #10
Source File: beamable_mm.py From fairseq with MIT License | 6 votes |
def forward(self, input1, input2): if ( not self.training and # test mode self.beam_size is not None and # beam size is set input1.dim() == 3 and # only support batched input input1.size(1) == 1 # single time step update ): bsz, beam = input1.size(0), self.beam_size # bsz x 1 x nhu --> bsz/beam x beam x nhu input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1) # bsz x sz2 x nhu --> bsz/beam x sz2 x nhu input2 = input2.unfold(0, beam, beam)[:, :, :, 0] # use non batched operation if bsz = beam if input1.size(0) == 1: output = torch.mm(input1[0, :, :], input2[0, :, :]) else: output = input1.bmm(input2) return output.view(bsz, 1, -1) else: return input1.bmm(input2)
Example #11
Source File: utilz.py From tamil-lm2 with GNU General Public License v2.0 | 6 votes |
def dump_cosine_similarity_tsv(config, vocab, embedding, filepath, count=100): assert embedding.shape[0] == len(vocab) matrix_filepath = filepath.replace('.tsv', '.matrix.pkl') similar_filepath = filepath.replace('.tsv', '.similar.tsv') dissimilar_filepath = filepath.replace('.tsv', '.dissimilar.tsv') e_norm = embedding / embedding.norm(dim=1)[:, None] scores = torch.mm(e_norm, e_norm.t()) pickle.dump(scores.cpu().numpy(), open(matrix_filepath, 'wb')) similars = scores.topk(count, dim=1)[1] dissimilars = (1 - scores).topk(count, dim=1)[1] similar_file = open(similar_filepath, 'w') dissimilar_file = open(dissimilar_filepath, 'w') for i in range(len(vocab)): similar_file.write('|'.join(vocab.index2word[j] for j in similars[i]) + '\n') dissimilar_file.write('|'.join(vocab.index2word[j] for j in dissimilars[i]) + '\n') similar_file.close() dissimilar_file.close()
Example #12
Source File: beamable_mm.py From crosentgec with GNU General Public License v3.0 | 6 votes |
def forward(self, input1, input2): if ( not self.training and # test mode self.beam_size is not None and # beam size is set input1.dim() == 3 and # only support batched input input1.size(1) == 1 # single time step update ): bsz, beam = input1.size(0), self.beam_size # bsz x 1 x nhu --> bsz/beam x beam x nhu input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1) # bsz x sz2 x nhu --> bsz/beam x sz2 x nhu input2 = input2.unfold(0, beam, beam)[:, :, :, 0] # use non batched operation if bsz = beam if input1.size(0) == 1: output = torch.mm(input1[0, :, :], input2[0, :, :]) else: output = input1.bmm(input2) return output.view(bsz, 1, -1) else: return input1.bmm(input2)
Example #13
Source File: sgcn.py From SGCN with GNU General Public License v3.0 | 6 votes |
def calculate_regression_loss(self, z, target): """ Calculating the regression loss for all pairs of nodes. :param z: Hidden vertex representations. :param target: Target vector. :return loss_term: Regression loss. :return predictions_soft: Predictions for each vertex pair. """ pos = torch.cat((self.positive_z_i, self.positive_z_j), 1) neg = torch.cat((self.negative_z_i, self.negative_z_j), 1) surr_neg_i = torch.cat((self.negative_z_i, self.negative_z_k), 1) surr_neg_j = torch.cat((self.negative_z_j, self.negative_z_k), 1) surr_pos_i = torch.cat((self.positive_z_i, self.positive_z_k), 1) surr_pos_j = torch.cat((self.positive_z_j, self.positive_z_k), 1) features = torch.cat((pos, neg, surr_neg_i, surr_neg_j, surr_pos_i, surr_pos_j)) predictions = torch.mm(features, self.regression_weights) predictions_soft = F.log_softmax(predictions, dim=1) loss_term = F.nll_loss(predictions_soft, target) return loss_term, predictions_soft
Example #14
Source File: transforms.py From torch-toolbox with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __call__(self, tensor): """ Args: tensor (Tensor): Tensor image of size (C, H, W) to be whitened. Returns: Tensor: Transformed image. """ if tensor.size(0) * tensor.size(1) * \ tensor.size(2) != self.transformation_matrix.size(0): raise ValueError( "tensor and transformation matrix have incompatible shape." + "[{} x {} x {}] != ".format( * tensor.size()) + "{}".format( self.transformation_matrix.size(0))) flat_tensor = tensor.view(1, -1) - self.mean_vector transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix) tensor = transformed_tensor.view(tensor.size()) return tensor
Example #15
Source File: modules.py From BAMnet with Apache License 2.0 | 6 votes |
def forward(self, query_embed, in_memory_embed, atten_mask=None): if self.atten_type == 'simple': # simple attention attention = torch.bmm(in_memory_embed, query_embed.unsqueeze(2)).squeeze(2) elif self.atten_type == 'mul': # multiplicative attention attention = torch.bmm(in_memory_embed, torch.mm(query_embed, self.W).unsqueeze(2)).squeeze(2) elif self.atten_type == 'add': # additive attention attention = torch.tanh(torch.mm(in_memory_embed.view(-1, in_memory_embed.size(-1)), self.W2)\ .view(in_memory_embed.size(0), -1, self.W2.size(-1)) \ + torch.mm(query_embed, self.W).unsqueeze(1)) attention = torch.mm(attention.view(-1, attention.size(-1)), self.W3).view(attention.size(0), -1) else: raise RuntimeError('Unknown atten_type: {}'.format(self.atten_type)) if atten_mask is not None: # Exclude masked elements from the softmax attention = atten_mask * attention - (1 - atten_mask) * INF return attention
Example #16
Source File: gcn_spmv.py From dgl with Apache License 2.0 | 6 votes |
def forward(self, h): if self.dropout: h = self.dropout(h) h = torch.mm(h, self.weight) # normalization by square root of src degree h = h * self.g.ndata['norm'] self.g.ndata['h'] = h self.g.update_all(fn.copy_src(src='h', out='m'), fn.sum(msg='m', out='h')) h = self.g.ndata.pop('h') # normalization by square root of dst degree h = h * self.g.ndata['norm'] # bias if self.bias is not None: h = h + self.bias if self.activation: h = self.activation(h) return h
Example #17
Source File: manager.py From gnn-comparison with GNU General Public License v3.0 | 6 votes |
def _power_iteration(self, A, num_simulations=30): # Ideally choose a random vector # To decrease the chance that our vector # Is orthogonal to the eigenvector b_k = torch.rand(A.shape[1]).unsqueeze(dim=1) * 0.5 - 1 for _ in range(num_simulations): # calculate the matrix-by-vector product Ab b_k1 = torch.mm(A, b_k) # calculate the norm b_k1_norm = torch.norm(b_k1) # re normalize the vector b_k = b_k1 / b_k1_norm return b_k
Example #18
Source File: model.py From LiteralE with Apache License 2.0 | 6 votes |
def forward(self, e1, rel): e1_emb = self.emb_e(e1) rel_emb = self.emb_rel(rel) e1_emb = e1_emb.view(-1, self.emb_dim) rel_emb = rel_emb.view(-1, self.emb_dim) # Begin literals e1_num_lit = self.numerical_literals[e1.view(-1)] e1_emb = self.emb_num_lit(torch.cat([e1_emb, e1_num_lit], 1)) e2_multi_emb = self.emb_num_lit(torch.cat([self.emb_e.weight, self.numerical_literals], 1)) # End literals e1_emb = self.inp_drop(e1_emb) rel_emb = self.inp_drop(rel_emb) pred = torch.mm(e1_emb*rel_emb, e2_multi_emb.t()) pred = F.sigmoid(pred) return pred
Example #19
Source File: set2set.py From LanczosNetwork with MIT License | 6 votes |
def forward(self, input_set): """ Args: input_set: shape N X D Returns: output_vec: shape 1 X 2D """ num_element = input_set.shape[0] element_dim = input_set.shape[1] assert element_dim == self.element_dim hidden = torch.zeros(1, 2 * self.element_dim).to(input_set.device) memory = torch.zeros(1, self.element_dim).to(input_set.device) for tt in range(self.num_step_encoder): hidden, memory = self.LSTM(hidden, memory) energy = torch.tanh(torch.mm(hidden, self.W_1) + input_set).mm(self.W_2) att_weight = F.softmax(energy, dim=0) read = (input_set * att_weight).sum(dim=0, keepdim=True) hidden = torch.cat([hidden, read], dim=1) return hidden
Example #20
Source File: layers.py From graph-cnn.pytorch with MIT License | 6 votes |
def forward(self, input, adj): h = torch.mm(input, self.W) N = h.size()[0] f_1 = torch.matmul(h, self.a1) f_2 = torch.matmul(h, self.a2) e = self.leakyrelu(f_1 + f_2.transpose(0,1)) zero_vec = -9e15*torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, h) if self.concat: return F.elu(h_prime) else: return h_prime
Example #21
Source File: infer.py From BERT-Relation-Extraction with Apache License 2.0 | 6 votes |
def evaluate(self): counts, hits = 0, 0 logger.info("Evaluating...") with torch.no_grad(): for meta_input, e1_e2_start, meta_labels in tqdm(self.train_loader, total=len(self.train_loader)): attention_mask = (meta_input != self.pad_id).float() token_type_ids = torch.zeros((meta_input.shape[0], meta_input.shape[1])).long() if self.cuda: meta_input = meta_input.cuda() attention_mask = attention_mask.cuda() token_type_ids = token_type_ids.cuda() outputs = self.net(meta_input, token_type_ids=token_type_ids, attention_mask=attention_mask, Q=None,\ e1_e2_start=e1_e2_start) matrix_product = torch.mm(outputs, outputs.T) closest_idx = matrix_product[-1][:-1].argmax().cpu().item() if closest_idx == meta_labels[-1].item(): hits += 1 counts += 1 print("Results (%d samples): %.3f %%" % (counts, (hits/counts)*100)) return meta_input, e1_e2_start, meta_labels, outputs
Example #22
Source File: evaluators.py From Dispersion-based-Clustering with MIT License | 6 votes |
def pairwise_distance(features, query=None, gallery=None, metric=None): if query is None and gallery is None: n = len(features) x = torch.cat(list(features.values())) x = x.view(n, -1) if metric is not None: x = metric.transform(x) dist = torch.pow(x, 2).sum(dim=1, keepdim=True) * 2 dist = dist.expand(n, n) - 2 * torch.mm(x, x.t()) return dist x = torch.cat([features["".join(f)].unsqueeze(0) for f, _, _, _ in query], 0) y = torch.cat([features["".join(f)].unsqueeze(0) for f, _, _, _ in gallery], 0) m, n = x.size(0), y.size(0) x = x.view(m, -1) y = y.view(n, -1) if metric is not None: x = metric.transform(x) y = metric.transform(y) dist = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(y, 2).sum(dim=1, keepdim=True).expand(n, m).t() dist.addmm_(1, -2, x, y.t()) return dist
Example #23
Source File: biaffine.py From BiaffineDependencyParsing with MIT License | 6 votes |
def __init__(self, input1_size, input2_size, output_size): """ 使用版本 :param input1_size: :param input2_size: :param output_size:双仿的分类空间 """ super().__init__() # 为什么+1: # 双仿变换的矩阵形式: # [(batch_size*seq_len),(head_feat_size+1)] * [(head_feat_size+1),((dep_feat_size+1))*output_size] # mm-> [(batch_size*seq_len),((dep_feat_size+1))*output_size] # [(batch_size*seq_len),((dep_feat_size+1))*output_size] # view-> [batch_size, (seq_len*output_size), (dep_feat_size+1)] # [batch_size, (seq_len*output_size), (dep_feat_size+1)] * [batch_size, (dep_feat_size+1), seq_len] # bmm-> [batch_size, (seq_len*output_size), seq_len] # [batch_size, (seq_len*output_size), seq_len] # view-> [batch_size, seq_len, seq_len, output_size] self.W_bilin = PairwiseBilinear(input1_size + 1, input2_size + 1, output_size) self.W_bilin.weight.data.zero_() self.W_bilin.bias.data.zero_()
Example #24
Source File: model.py From LiteralE with Apache License 2.0 | 6 votes |
def forward(self, e1, rel): e1_emb = self.emb_e(e1) rel_emb = self.emb_rel(rel) e1_emb = e1_emb.view(-1, self.emb_dim) rel_emb = rel_emb.view(-1, self.emb_dim) # Begin literals e1_num_lit = self.numerical_literals[e1.view(-1)] e1_emb = self.emb_num_lit(e1_emb, e1_num_lit) e2_multi_emb = self.emb_num_lit(self.emb_e.weight, self.numerical_literals) # End literals e1_emb = self.inp_drop(e1_emb) rel_emb = self.inp_drop(rel_emb) pred = torch.mm(e1_emb*rel_emb, e2_multi_emb.t()) pred = F.sigmoid(pred) return pred
Example #25
Source File: sgcn.py From SGCN with GNU General Public License v3.0 | 6 votes |
def score_model(self, epoch): """ Score the model on the test set edges in each epoch. :param epoch: Epoch number. """ loss, self.train_z = self.model(self.positive_edges, self.negative_edges, self.y) score_positive_edges = torch.from_numpy(np.array(self.test_positive_edges, dtype=np.int64).T).type(torch.long).to(self.device) score_negative_edges = torch.from_numpy(np.array(self.test_negative_edges, dtype=np.int64).T).type(torch.long).to(self.device) test_positive_z = torch.cat((self.train_z[score_positive_edges[0, :], :], self.train_z[score_positive_edges[1, :], :]), 1) test_negative_z = torch.cat((self.train_z[score_negative_edges[0, :], :], self.train_z[score_negative_edges[1, :], :]), 1) scores = torch.mm(torch.cat((test_positive_z, test_negative_z), 0), self.model.regression_weights.to(self.device)) probability_scores = torch.exp(F.softmax(scores, dim=1)) predictions = probability_scores[:, 0]/probability_scores[:, 0:2].sum(1) predictions = predictions.cpu().detach().numpy() targets = [0]*len(self.test_positive_edges) + [1]*len(self.test_negative_edges) auc, f1 = calculate_auc(targets, predictions, self.edges) self.logs["performance"].append([epoch+1, auc, f1])
Example #26
Source File: models.py From Extremely-Fine-Grained-Entity-Typing with MIT License | 5 votes |
def cosine_similarity(x1, x2=None, eps=1e-8): x2 = x1 if x2 is None else x2 w1 = x1.norm(p=2, dim=1, keepdim=True) w2 = w1 if x2 is x1 else x2.norm(p=2, dim=1, keepdim=True) return torch.mm(x1, x2.t()) / (w1 * w2.t()).clamp(min=eps)
Example #27
Source File: layers.py From DropEdge with MIT License | 5 votes |
def forward(self, input, adj): output = torch.mm(input, self.weight) if self.bias is not None: output = output + self.bias output = self.bn(output) return self.sigma(output)
Example #28
Source File: model.py From ggnn.pytorch with MIT License | 5 votes |
def forward(self, x, E_start, E_end): # E_start, E_end : E x V xin = x # conv1 Vix = self.Vi1(x) # V x H_out Vjx = self.Vj1(x) # V x H_out x1 = torch.mm(E_end,Vix) + torch.mm(E_start,Vjx) + self.bv1 # E x H_out x1 = torch.sigmoid(x1) Ujx = self.Uj1(x) # V x H_out x2 = torch.mm(E_start, Ujx) # V x H_out Uix = self.Ui1(x) # V x H_out x = Uix + torch.mm(E_end.t(), x1*x2) + self.bu1 # V x H_out # bn1 x = self.bn1(x) # relu1 x = F.relu(x) # conv2 Vix = self.Vi2(x) # V x H_out Vjx = self.Vj2(x) # V x H_out x1 = torch.mm(E_end,Vix) + torch.mm(E_start,Vjx) + self.bv2 # E x H_out x1 = torch.sigmoid(x1) Ujx = self.Uj2(x) # V x H_out x2 = torch.mm(E_start, Ujx) # V x H_out Uix = self.Ui2(x) # V x H_out x = Uix + torch.mm(E_end.t(), x1*x2) + self.bu2 # V x H_out # bn2 x = self.bn2(x) # addition x = x + self.R(xin) # relu2 x = F.relu(x) return x ############################## # Class NN definition ##############################
Example #29
Source File: model.py From LiteralE with Apache License 2.0 | 5 votes |
def forward(self, e1, rel): e1_emb_real = self.emb_e_real(e1).view(Config.batch_size, -1) rel_emb_real = self.emb_rel_real(rel).view(Config.batch_size, -1) e1_emb_img = self.emb_e_img(e1).view(Config.batch_size, -1) rel_emb_img = self.emb_rel_img(rel).view(Config.batch_size, -1) # Begin literals e1_num_lit = self.numerical_literals[e1.view(-1)] e1_emb_real = self.emb_num_lit_real(torch.cat([e1_emb_real, e1_num_lit], 1)) e1_emb_img = self.emb_num_lit_img(torch.cat([e1_emb_img, e1_num_lit], 1)) e2_multi_emb_real = self.emb_num_lit_real(torch.cat([self.emb_e_real.weight, self.numerical_literals], 1)) e2_multi_emb_img = self.emb_num_lit_img(torch.cat([self.emb_e_img.weight, self.numerical_literals], 1)) # End literals e1_emb_real = self.inp_drop(e1_emb_real) rel_emb_real = self.inp_drop(rel_emb_real) e1_emb_img = self.inp_drop(e1_emb_img) rel_emb_img = self.inp_drop(rel_emb_img) realrealreal = torch.mm(e1_emb_real*rel_emb_real, e2_multi_emb_real.t()) realimgimg = torch.mm(e1_emb_real*rel_emb_img, e2_multi_emb_img.t()) imgrealimg = torch.mm(e1_emb_img*rel_emb_real, e2_multi_emb_img.t()) imgimgreal = torch.mm(e1_emb_img*rel_emb_img, e2_multi_emb_real.t()) pred = realrealreal + realimgimg + imgrealimg - imgimgreal pred = F.sigmoid(pred) return pred
Example #30
Source File: model.py From LiteralE with Apache License 2.0 | 5 votes |
def forward(self, e1, rel): e1_emb = self.emb_e(e1).view(-1, self.emb_dim) rel_emb = self.emb_rel(rel).view(-1, self.emb_dim) e1_emb = self.inp_drop(e1_emb) rel_emb = self.inp_drop(rel_emb) score_l = torch.mm(e1_emb*rel_emb, self.emb_e.weight.t()) """ Begin numerical literals """ n_h = self.numerical_literals[e1.view(-1)] # (batch_size x n_lit) n_t = self.numerical_literals # (num_ents x n_lit) # Features (batch_size x num_ents x n_lit) n = n_h.unsqueeze(1).repeat(1, self.num_entities, 1) - n_t phi = self.rbf(n) # Weights (batch_size, 1, n_lits) w_nf = self.nf_weights(rel) # (batch_size, num_ents) score_n = torch.bmm(phi, w_nf.transpose(1, 2)).squeeze() """ End numerical literals """ score = F.sigmoid(score_l + score_n) return score