Python torch.nn.functional.cosine_similarity() Examples

The following are 30 code examples of torch.nn.functional.cosine_similarity(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: base_layer.py    From torch-light with MIT License 6 votes vote down vote up
def forward(self, repres, max_att):
        """
        Args:
            repres - [bsz, a_len|q_len, cont_dim]
            max_att - [bsz, q_len|a_len, cont_dim]
        Return:
            size - [bsz, sentence_len, mp_dim]
        """
        bsz = repres.size(0)
        sent_len = repres.size(1)

        repres = repres.view(-1, self.cont_dim)
        max_att = max_att.view(-1, self.cont_dim)
        repres = multi_perspective_expand_for_2D(repres, self.weight)
        max_att = multi_perspective_expand_for_2D(max_att, self.weight)
        temp = cosine_similarity(repres, max_att, repres.dim()-1)

        return temp.view(bsz, sent_len, self.mp_dim) 
Example #2
Source File: LeakGAN_G.py    From TextGAN-PyTorch with MIT License 6 votes vote down vote up
def worker_cos_reward(self, feature_array, goal_array):
        """
        Get reward for worker (cosine distance)

        :return: cos_loss: batch_size * seq_len
        """
        for i in range(int(self.max_seq_len / self.step_size)):
            real_feature = feature_array[:, i * self.step_size, :].unsqueeze(1).expand((-1, self.step_size, -1))
            feature_array[:, i * self.step_size:(i + 1) * self.step_size, :] = real_feature
            if i > 0:
                sum_goal = torch.sum(goal_array[:, (i - 1) * self.step_size:i * self.step_size, :], dim=1, keepdim=True)
            else:
                sum_goal = goal_array[:, 0, :].unsqueeze(1)
            goal_array[:, i * self.step_size:(i + 1) * self.step_size, :] = sum_goal.expand((-1, self.step_size, -1))

        offset_feature = feature_array[:, 1:, :]  # f_{t+1}, batch_size * seq_len * goal_out_size
        goal_array = goal_array[:, :self.max_seq_len, :]  # batch_size * seq_len * goal_out_size
        sub_feature = offset_feature - goal_array

        # L2 normalization
        sub_feature = F.normalize(sub_feature, p=2, dim=-1)
        all_goal = F.normalize(goal_array, p=2, dim=-1)

        cos_loss = F.cosine_similarity(sub_feature, all_goal, dim=-1)  # batch_size * seq_len
        return cos_loss 
Example #3
Source File: model.py    From castor with Apache License 2.0 6 votes vote down vote up
def _algo_2_vert_comp(self, sent1_block_a, sent2_block_a, sent1_block_b, sent2_block_b):
        comparison_feats = []
        ws_no_inf = [w for w in self.filter_widths if not np.isinf(w)]
        for pool in ('max', 'min', 'mean'):
            for ws1 in self.filter_widths:
                x1 = sent1_block_a[ws1][pool]
                for ws2 in self.filter_widths:
                    x2 = sent2_block_a[ws2][pool]
                    if (not np.isinf(ws1) and not np.isinf(ws2)) or (np.isinf(ws1) and np.isinf(ws2)):
                        comparison_feats.append(F.cosine_similarity(x1, x2).unsqueeze(1))
                        comparison_feats.append(F.pairwise_distance(x1, x2).unsqueeze(1))
                        comparison_feats.append(torch.abs(x1 - x2))

        for pool in ('max', 'min'):
            for ws in ws_no_inf:
                oG_1B = sent1_block_b[ws][pool]
                oG_2B = sent2_block_b[ws][pool]
                for i in range(0, self.n_per_dim_filters):
                    x1 = oG_1B[:, :, i]
                    x2 = oG_2B[:, :, i]
                    comparison_feats.append(F.cosine_similarity(x1, x2).unsqueeze(1))
                    comparison_feats.append(F.pairwise_distance(x1, x2).unsqueeze(1))
                    comparison_feats.append(torch.abs(x1 - x2))

        return torch.cat(comparison_feats, dim=1) 
Example #4
Source File: bimpm.py    From sentence-similarity with MIT License 6 votes vote down vote up
def matching_strategy_full(self, v1, v2, W):
        """
        :param v1: batch x seq_len x n_hidden
        :param v2: batch x n_hidden (FULL) or batch x seq_len x n_hidden (ATTENTIVE)
        :param W:  l x n_hidden
        :return: batch x seq_len x l
        """
        l = W.size(0)
        batch_size = v1.size(0)
        seq_len = v1.size(1)

        v1 = v1.unsqueeze(2).expand(-1, -1, l, -1)          # batch x seq_len x l x n_hidden
        W_expanded = W.expand(batch_size, seq_len, -1, -1)  # batch x seq_len x l x n_hidden
        Wv1 = W_expanded.mul(v1)                            # batch x seq_len x l x n_hidden

        if len(v2.size()) == 2:
            v2 = v2.unsqueeze(1).unsqueeze(1).expand(-1, seq_len, l, -1)  # batch x seq_len x l x n_hidden
        elif len(v2.size()) == 3:
            v2 = v2.unsqueeze(2).expand(-1, -1, l, -1)  # batch x seq_len x l x n_hidden
        else:
            raise ValueError(f'Invalid v2 tensor size {v2.size()}')
        Wv2 = W_expanded.mul(v2)

        cos_sim = F.cosine_similarity(Wv1, Wv2, dim=3)
        return cos_sim 
Example #5
Source File: network_utils.py    From 3d-vehicle-tracking with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def linear_motion_loss(outputs, mask):
    #batch_size = outputs.shape[0]
    s_len = outputs.shape[1]

    loss = outputs.new_zeros(1)
    for idx in range(2, s_len, 1):
        # mask loss to valid outputs
        # motion_mask: (B, 1), the mask of current frame
        motion_mask = mask[:, idx].view(mask.shape[0], 1)

        # Loss: |(loc_t - loc_t-1), (loc_t-1, loc_t-2)|_1 for t = [2, s_len]
        # If loc_t is empty, mask it out by motion_mask
        curr_motion = (outputs[:, idx] - outputs[:, idx - 1]) * motion_mask
        past_motion = (outputs[:, idx - 1] - outputs[:, idx - 2]) * motion_mask
        loss += torch.mean(1.0 - F.cosine_similarity(past_motion, curr_motion))
        loss += F.l1_loss(past_motion, curr_motion)
    return loss / (torch.sum(mask)) 
Example #6
Source File: helper.py    From backdoor_federated_learning with MIT License 6 votes vote down vote up
def model_cosine_similarity(self, model, target_params_variables,
                                model_id='attacker'):

        cs_list = list()
        cs_loss = torch.nn.CosineSimilarity(dim=0)
        for name, data in model.named_parameters():
            if name == 'decoder.weight':
                continue

            model_update = 100*(data.view(-1) - target_params_variables[name].view(-1)) + target_params_variables[name].view(-1)


            cs = F.cosine_similarity(model_update,
                                     target_params_variables[name].view(-1), dim=0)
            # logger.info(torch.equal(layer.view(-1),
            #                          target_params_variables[name].view(-1)))
            # logger.info(name)
            # logger.info(cs.data[0])
            # logger.info(torch.norm(model_update).data[0])
            # logger.info(torch.norm(fake_weights[name]))
            cs_list.append(cs)
        cos_los_submit = 1*(1-sum(cs_list)/len(cs_list))
        logger.info(model_id)
        logger.info((sum(cs_list)/len(cs_list)).data[0])
        return 1e3*sum(cos_los_submit) 
Example #7
Source File: basic_model.py    From IRNet with MIT License 6 votes vote down vote up
def embedding_cosine(self, src_embedding, table_embedding, table_unk_mask):
        embedding_differ = []
        for i in range(table_embedding.size(1)):
            one_table_embedding = table_embedding[:, i, :]
            one_table_embedding = one_table_embedding.unsqueeze(1).expand(table_embedding.size(0),
                                                                          src_embedding.size(1),
                                                                          table_embedding.size(2))

            topk_val = F.cosine_similarity(one_table_embedding, src_embedding, dim=-1)

            embedding_differ.append(topk_val)
        embedding_differ = torch.stack(embedding_differ).transpose(1, 0)
        embedding_differ.data.masked_fill_(table_unk_mask.unsqueeze(2).expand(
            table_embedding.size(0),
            table_embedding.size(1),
            embedding_differ.size(2)
        ).bool(), 0)

        return embedding_differ 
Example #8
Source File: actor_observer_wrapper.py    From PyVideoResearch with GNU General Public License v3.0 6 votes vote down vote up
def base(self, x, y, z):
        #base_y = self.basenet(y)
        #if random.random() > .5:  # TODO Debug, make sure order doesn't matter
        #    base_x = self.basenet(x)
        #    base_z = self.basenet(z)
        #else:
        #    base_z = self.basenet(z)
        #    base_x = self.basenet(x)
        base_x = self.basenet(x)
        base_y = self.basenet(y)
        base_z = self.basenet(z)

        if self.distance == 'cosine':
            dist_a = .5 - .5 * F.cosine_similarity(base_x, base_y, 1, 1e-6).view(-1)
            dist_b = .5 - .5 * F.cosine_similarity(base_y, base_z, 1, 1e-6).view(-1)
        elif self.distance == 'l2':
            dist_a = F.pairwise_distance(base_x, base_y, 2).view(-1)
            dist_b = F.pairwise_distance(base_y, base_z, 2).view(-1)
        else:
            assert False, "Wrong args.distance"

        print('fc7 norms:', base_x.norm().item(), base_y.norm().item(), base_z.norm().item())
        print('pairwise dist means:', dist_a.mean().item(), dist_b.mean().item())
        return base_x, base_y, base_z, dist_a, dist_b 
Example #9
Source File: model.py    From KitcheNette with Apache License 2.0 6 votes vote down vote up
def forward(self, d1_r, d1_c, d1_l, d2_r, d2_c, d2_l):
        siamese_embed1 = self.model_siamese(d1_r)
        siamese_embed2 = self.model_siamese(d2_r)
        outputs = [siamese_embed1, siamese_embed2]

        if self.dist_fn == "concat":
            output_concat = self.model_concat(siamese_embed1, siamese_embed2)
            output = output_concat
            outputs.append(output)

        elif self.dist_fn == "cos":
            output_cos = F.cosine_similarity(siamese_embed1 + 1e-16, siamese_embed2 + 1e-16, dim=-1)
            output = output_cos
            outputs.append(output)

        elif self.dist_fn == "widedeep":
            output_widedeep = self.model_widedeep(siamese_embed1, siamese_embed2, d1_c, d2_c)
            output = output_widedeep
            outputs.append(output)

        return outputs 
Example #10
Source File: jointemb.py    From deep-code-search with MIT License 6 votes vote down vote up
def similarity(self, code_vec, desc_vec):
        """
        https://arxiv.org/pdf/1508.01585.pdf 
        """
        assert self.conf['sim_measure'] in ['cos', 'poly', 'euc', 'sigmoid', 'gesd', 'aesd'], "invalid similarity measure"
        if self.conf['sim_measure']=='cos':
            return F.cosine_similarity(code_vec, desc_vec)
        elif self.conf['sim_measure']=='poly':
            return (0.5*torch.matmul(code_vec, desc_vec.t()).diag()+1)**2
        elif self.conf['sim_measure']=='sigmoid':
            return torch.tanh(torch.matmul(code_vec, desc_vec.t()).diag()+1)
        elif self.conf['sim_measure'] in ['euc', 'gesd', 'aesd']:
            euc_dist = torch.dist(code_vec, desc_vec, 2) # or torch.norm(code_vec-desc_vec,2)
            euc_sim = 1 / (1 + euc_dist)
            if self.conf['sim_measure']=='euc': return euc_sim                
            sigmoid_sim = torch.sigmoid(torch.matmul(code_vec, desc_vec.t()).diag()+1)
            if self.conf['sim_measure']=='gesd': 
                return euc_sim * sigmoid_sim
            elif self.conf['sim_measure']=='aesd':
                return 0.5*(euc_sim+sigmoid_sim) 
Example #11
Source File: verification.py    From SpeakerRecognition_tutorial with MIT License 6 votes vote down vote up
def perform_verification(use_cuda, model, embeddings, enroll_speaker, test_filename, test_frames, thres):
    enroll_embedding = embeddings[enroll_speaker]
    test_embedding = get_embeddings(use_cuda, test_filename, model, test_frames)

    score = F.cosine_similarity(test_embedding, enroll_embedding)
    score = score.data.cpu().numpy() 
        
    if score > thres:
        result = 'Accept'
    else:
        result = 'Reject'
        
    test_spk = test_filename.split('/')[-2].split('_')[0]
    print("\n=== Speaker verification ===")
    print("True speaker: %s\nClaimed speaker : %s\n\nResult : %s\n" %(enroll_speaker, test_spk, result))
    print("Score : %0.4f\nThreshold : %0.2f\n" %(score, thres)) 
Example #12
Source File: pytorch_clusters.py    From pytorch_active_learning with MIT License 6 votes vote down vote up
def cosine_similary(self, item):
        text = item[1]
        words = text.split()  
        
        vector = [0] * len(self.feature_vector)
        for word in words:
            if word not in self.feature_idx:
                self.feature_idx[word] = len(self.feature_vector)
                self.feature_vector.append(0)
                vector.append(1)
            else:
                while len(vector) <= self.feature_idx[word]:
                    vector.append(0)
                    self.feature_vector.append(0)
                              
                vector[self.feature_idx[word]] += 1
        
        item_tensor = torch.FloatTensor(vector)
        cluster_tensor = torch.FloatTensor(self.feature_vector)
        
        similarity = F.cosine_similarity(item_tensor, cluster_tensor, 0)
        
        # Alternatively using `F.pairwise_distance()` but normalize the cluster first
        
        return similarity.item() # item() converts tensor value to float 
Example #13
Source File: lite_model.py    From castor with Apache License 2.0 6 votes vote down vote up
def _algo_1_horiz_comp(self, sent1_block_a, sent2_block_a):
        comparison_feats = []
        regM1, regM2 = [], []
        for ws in self.filter_widths:
            x1 = sent1_block_a[ws]['max'].unsqueeze(2)
            x2 = sent2_block_a[ws]['max'].unsqueeze(2)
            if np.isinf(ws):
                x1 = x1.expand(-1, self.n_holistic_filters, -1)
                x2 = x2.expand(-1, self.n_holistic_filters, -1)
            regM1.append(x1)
            regM2.append(x2)

        regM1 = torch.cat(regM1, dim=2)
        regM2 = torch.cat(regM2, dim=2)

        # Cosine similarity
        comparison_feats.append(F.cosine_similarity(regM1, regM2, dim=2))
        # Euclidean distance
        pairwise_distances = []
        for x1, x2 in zip(regM1, regM2):
            dist = F.pairwise_distance(x1, x2).view(1, -1)
            pairwise_distances.append(dist)
        comparison_feats.append(torch.cat(pairwise_distances))

        return torch.cat(comparison_feats, dim=1) 
Example #14
Source File: mpcnn.py    From sentence-similarity with MIT License 5 votes vote down vote up
def _algo_1_horiz_comp(self, sent1_block_a, sent2_block_a):
        comparison_feats = []
        for pool in ('max', 'min', 'mean'):
            for ws in self.filter_widths:
                x1 = sent1_block_a[ws][pool]
                x2 = sent2_block_a[ws][pool]
                batch_size = x1.size()[0]
                comparison_feats.append(F.cosine_similarity(x1, x2).contiguous().view(batch_size, 1))
                comparison_feats.append(F.pairwise_distance(x1, x2))
        return torch.cat(comparison_feats, dim=1) 
Example #15
Source File: mpcnn_lite.py    From sentence-similarity with MIT License 5 votes vote down vote up
def _algo_1_horiz_comp(self, sent1_block_a, sent2_block_a):
        comparison_feats = []
        for pool in ('max', ):
            for ws in self.filter_widths:
                x1 = sent1_block_a[ws][pool]
                x2 = sent2_block_a[ws][pool]
                batch_size = x1.size()[0]
                comparison_feats.append(F.cosine_similarity(x1, x2).contiguous().view(batch_size, 1))
                comparison_feats.append(F.pairwise_distance(x1, x2))
        return torch.cat(comparison_feats, dim=1) 
Example #16
Source File: mpcnn.py    From sentence-similarity with MIT License 5 votes vote down vote up
def _algo_2_vert_comp(self, sent1_block_a, sent2_block_a, sent1_block_b, sent2_block_b):
        comparison_feats = []
        ws_no_inf = [w for w in self.filter_widths if not np.isinf(w)]
        for pool in ('max', 'min', 'mean'):
            for ws1 in self.filter_widths:
                x1 = sent1_block_a[ws1][pool]
                batch_size = x1.size()[0]
                for ws2 in self.filter_widths:
                    x2 = sent2_block_a[ws2][pool]
                    if (not np.isinf(ws1) and not np.isinf(ws2)) or (np.isinf(ws1) and np.isinf(ws2)):
                        comparison_feats.append(F.cosine_similarity(x1, x2).contiguous().view(batch_size, 1))
                        comparison_feats.append(F.pairwise_distance(x1, x2))
                        comparison_feats.append(torch.abs(x1 - x2))

        for pool in ('max', 'min'):
            for ws in ws_no_inf:
                oG_1B = sent1_block_b[ws][pool]
                oG_2B = sent2_block_b[ws][pool]
                for i in range(0, self.n_per_dim_filters):
                    x1 = oG_1B[:, :, i]
                    x2 = oG_2B[:, :, i]
                    batch_size = x1.size()[0]
                    comparison_feats.append(F.cosine_similarity(x1, x2).contiguous().view(batch_size, 1))
                    comparison_feats.append(F.pairwise_distance(x1, x2))
                    comparison_feats.append(torch.abs(x1 - x2))

        return torch.cat(comparison_feats, dim=1) 
Example #17
Source File: esm.py    From context_attentive_ir with MIT License 5 votes vote down vote up
def forward(self, batch_queries, query_len, batch_docs, doc_len):
        """
        Forward function of the dssm model. Return average loss for a batch of queries.
        :param batch_queries: 2d tensor [batch_size x max_query_length]
        :param query_len: 1d numpy array [batch_size]
        :param batch_docs: 3d tensor [batch_size x num_rel_docs_per_query x max_document_length]
        :param doc_len: 2d numpy array [batch_size x num_clicks_per_query]
        :return: softmax score representing click probability [batch_size x num_rel_docs_per_query]
        """
        assert batch_queries.shape[0] == batch_docs.shape[0]
        batch_size = batch_queries.shape[0]
        qlen = batch_queries.shape[1]
        num_docs, dlen = batch_docs.shape[1], batch_docs.shape[2]

        # embed query
        embedded_queries = self.word_embeddings(batch_queries.unsqueeze(2))
        embedded_queries = embedded_queries.mean(1)  # averaging

        # embed document
        doc_rep = batch_docs.view(batch_size * num_docs, dlen).unsqueeze(2)
        embedded_docs = self.word_embeddings(doc_rep)
        embedded_docs = embedded_docs.mean(1)  # averaging
        doc_rep = embedded_docs.view(batch_size, num_docs, -1)

        query_rep = embedded_queries.unsqueeze(1).expand(*doc_rep.size())
        scores = f.cosine_similarity(query_rep, doc_rep, dim=2)
        return scores 
Example #18
Source File: utils.py    From PyTorch_Speaker_Verification with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_cossim_prior(embeddings, centroids):
    # Calculates cosine similarity matrix. Requires (N, M, feature) input
    cossim = torch.zeros(embeddings.size(0),embeddings.size(1),centroids.size(0))
    for speaker_num, speaker in enumerate(embeddings):
        for utterance_num, utterance in enumerate(speaker):
            for centroid_num, centroid in enumerate(centroids):
                if speaker_num == centroid_num:
                    centroid = get_centroid(embeddings, speaker_num, utterance_num)
                output = F.cosine_similarity(utterance,centroid,dim=0)+1e-6
                cossim[speaker_num][utterance_num][centroid_num] = output
    return cossim 
Example #19
Source File: loss_phase.py    From onssen with GNU General Public License v3.0 5 votes vote down vote up
def loss_phase(output, label):
    assert len(output) == 6, "There must be 5 tensors in the output"
    assert len(label) == 6, "There must be 6 tensors in the label"
    [embedding, mask_A, mask_B, phase_A, phase_B] = output
    [one_hot_label, mag_mix, mag_s1, mag_s2, phase_s1, phase_s2] = label
    batch_size, time_size, frequency_size = mag_mix.size()
    # compute the loss of embedding part
    loss_embedding = loss_dc([embedding, mag_mix], [one_hot_label])

    #compute the loss of mask part
    loss_mask1 = norm_1d(mask_A*mag_mix - mag_s1)\
               + norm_1d(mask_B*mag_mix - mag_s2)
    loss_mask2 = norm_1d(mask_B*mag_mix - mag_s1)\
               + norm_1d(mask_A*mag_mix - mag_s2)

    amin = loss_mask1<loss_mask2
    loss_mask = torch.zeros_like(loss_mask1)
    loss_mask[amin] = loss_mask1[amin]
    loss_mask[~amin] = loss_mask2[~amin]

    loss_phase1 = -mag_mix * F.cosine_similarity(phase_A, phase_s1, dim=3)\
                  -mag_mix * F.cosine_similarity(phase_B, phase_s2, dim=3)
    loss_phase2 = -mag_mix * F.cosine_similarity(phase_B, phase_s1, dim=3)\
                  -mag_mix * F.cosine_similarity(phase_A, phase_s2, dim=3)

    loss_phase1 = torch.sum(loss_phase1.reshape(batch_size,-1),dim=1)
    loss_phase2 = torch.sum(loss_phase2.reshape(batch_size,-1),dim=1)
    loss_phase = torch.zeros_like(loss_phase1)
    loss_phase[amin] = loss_phase1[amin]
    loss_phase[~amin] = loss_phase2[~amin]

    return loss_embedding*0.975 + loss_mask*0.025 + loss_phase*0.025 
Example #20
Source File: zil.py    From incremental_learning.pytorch with MIT License 5 votes vote down vote up
def forward_gmmn(self, visual_features, semantic_features, class_id, words, metrics):
        loss = mmd(real=visual_features, fake=semantic_features, **self.gmmn_config["mmd"])

        if self.gmmn_config.get("old_mmd") and self._old_word_embeddings is not None:
            old_unseen_limit = self._n_classes - self._task_size

            if not self.gmmn_config["old_mmd"].get(
                "apply_unseen", False
            ) and class_id >= old_unseen_limit:
                return loss
            with torch.no_grad():
                old_semantic_features = self._old_word_embeddings(words)

            factor = self.gmmn_config["old_mmd"]["factor"]
            _type = self.gmmn_config["old_mmd"].get("type", "mmd")
            if _type == "mmd":
                old_loss = factor * mmd(
                    real=old_semantic_features, fake=semantic_features, **self.gmmn_config["mmd"]
                )
            elif _type == "kl":
                old_loss = factor * F.kl_div(
                    semantic_features, old_semantic_features, reduction="batchmean"
                )
            elif _type == "l2":
                old_loss = factor * torch.pairwise_distance(
                    semantic_features, old_semantic_features, p=2
                ).mean()
            elif _type == "cosine":
                old_loss = factor * (
                    1 - torch.cosine_similarity(semantic_features, old_semantic_features)
                ).mean()
            else:
                raise ValueError(f"Unknown distillation: {_type}.")

            if self.gmmn_config.get("scheduled"):
                old_loss = old_loss * math.sqrt(self._n_classes / self._task_size)

            metrics["old"] += old_loss.item()
            return loss + old_loss
        return loss 
Example #21
Source File: test_pyprof_nvtx.py    From apex with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_cosine_similarity(self):
        inp1 = torch.randn(1024, 128, device='cuda', dtype=self.dtype)
        inp2 = torch.randn(1024, 128, device='cuda', dtype=self.dtype)
        output = F.cosine_similarity(inp1, inp2, dim=1, eps=1e-8) 
Example #22
Source File: cdssm.py    From context_attentive_ir with MIT License 5 votes vote down vote up
def forward(self, batch_queries, query_len, batch_docs, doc_len):
        """
        Forward function of the dssm model. Return average loss for a batch of queries.
        :param batch_queries: 2d tensor [batch_size x max_query_length]
        :param query_len: 1d numpy array [batch_size]
        :param batch_docs: 3d tensor [batch_size x num_rel_docs_per_query x max_document_length]
        :param doc_len: 2d numpy array [batch_size x num_clicks_per_query]
        :return: softmax score representing click probability [batch_size x num_rel_docs_per_query]
        """
        assert batch_queries.shape[0] == batch_docs.shape[0]
        batch_size = batch_queries.shape[0]
        qlen = batch_queries.shape[1]
        num_docs, dlen = batch_docs.shape[1], batch_docs.shape[2]

        # query encoding
        embedded_queries = self.word_embeddings(batch_queries.unsqueeze(2))
        embedded_queries = self.emb_drop(embedded_queries)  # b,s,h
        embedded_queries = self._interleave_tensor(embedded_queries)  # b,s-2,3h
        query_rep = self.query_conv(embedded_queries.transpose(1, 2)).transpose(1, 2)
        query_rep = f.tanh(self.query_sem(f.tanh(query_rep)))
        latent_query_rep = query_rep.max(1)[0]  # max-pooling

        # document encoding
        doc_rep = batch_docs.view(batch_size * num_docs, dlen).unsqueeze(2)
        embedded_docs = self.word_embeddings(doc_rep)
        embedded_docs = self.emb_drop(embedded_docs)  # b,s,h
        embedded_docs = self._interleave_tensor(embedded_docs)  # b,s-2,3h
        doc_rep = self.doc_conv(embedded_docs.transpose(1, 2)).transpose(1, 2)
        doc_rep = f.tanh(self.doc_sem(f.tanh(doc_rep)))
        latent_doc_rep = doc_rep.max(1)[0]  # max-pooling
        latent_doc_rep = latent_doc_rep.view(batch_size, num_docs, -1)

        # compute loss
        latent_query_rep = latent_query_rep.unsqueeze(1).expand(*latent_doc_rep.size())
        scores = f.cosine_similarity(latent_query_rep, latent_doc_rep, dim=2)
        return scores 
Example #23
Source File: mpcnn_lite.py    From sentence-similarity with MIT License 5 votes vote down vote up
def _algo_2_vert_comp(self, sent1_block_a, sent2_block_a):
        comparison_feats = []
        ws_no_inf = [w for w in self.filter_widths if not np.isinf(w)]
        for pool in ('max', ):
            for ws1 in self.filter_widths:
                x1 = sent1_block_a[ws1][pool]
                batch_size = x1.size()[0]
                for ws2 in self.filter_widths:
                    x2 = sent2_block_a[ws2][pool]
                    if (not np.isinf(ws1) and not np.isinf(ws2)) or (np.isinf(ws1) and np.isinf(ws2)):
                        comparison_feats.append(F.cosine_similarity(x1, x2).contiguous().view(batch_size, 1))
                        comparison_feats.append(F.pairwise_distance(x1, x2))
                        comparison_feats.append(torch.abs(x1 - x2))

        return torch.cat(comparison_feats, dim=1) 
Example #24
Source File: metanet.py    From FewRel with MIT License 5 votes vote down vote up
def attention_score(self, s_att, q_att):
        '''
        s_att: (B, N, K, D)
        q_att: (B, NQ, D)
        '''
        s_att = s_att.view(s_att.size(0), s_att.size(1) * s_att.size(2), s_att.size(3)) # (B, N * K, D)
        s_att = s_att.unsqueeze(1) # (B, 1, N * K, D)
        q_att = q_att.unsqueeze(2) # (B, NQ, 1, D)
        cos = F.cosine_similarity(s_att, q_att, dim=-1) # (B, NQ, N * K)
        score = F.softmax(cos, -1) # (B, NQ, N * K)
        return score 
Example #25
Source File: model.py    From castor with Apache License 2.0 5 votes vote down vote up
def _algo_1_horiz_comp(self, sent1_block_a, sent2_block_a):
        comparison_feats = []
        for pool in ('max', 'min', 'mean'):
            regM1, regM2 = [], []
            for ws in self.filter_widths:
                x1 = sent1_block_a[ws][pool].unsqueeze(2)
                x2 = sent2_block_a[ws][pool].unsqueeze(2)
                if np.isinf(ws):
                    x1 = x1.expand(-1, self.n_holistic_filters, -1)
                    x2 = x2.expand(-1, self.n_holistic_filters, -1)
                regM1.append(x1)
                regM2.append(x2)

            regM1 = torch.cat(regM1, dim=2)
            regM2 = torch.cat(regM2, dim=2)

            # Cosine similarity
            comparison_feats.append(F.cosine_similarity(regM1, regM2, dim=2))
            # Euclidean distance
            pairwise_distances = []
            for x1, x2 in zip(regM1, regM2):
                dist = F.pairwise_distance(x1, x2).view(1, -1)
                pairwise_distances.append(dist)
            comparison_feats.append(torch.cat(pairwise_distances))

        return torch.cat(comparison_feats, dim=1) 
Example #26
Source File: lite_model.py    From castor with Apache License 2.0 5 votes vote down vote up
def _algo_2_vert_comp(self, sent1_block_a, sent2_block_a):
        comparison_feats = []
        ws_no_inf = [w for w in self.filter_widths if not np.isinf(w)]
        for ws1 in self.filter_widths:
            x1 = sent1_block_a[ws1]['max']
            for ws2 in self.filter_widths:
                x2 = sent2_block_a[ws2]['max']
                if (not np.isinf(ws1) and not np.isinf(ws2)) or (np.isinf(ws1) and np.isinf(ws2)):
                    comparison_feats.append(F.cosine_similarity(x1, x2).unsqueeze(1))
                    comparison_feats.append(F.pairwise_distance(x1, x2).unsqueeze(1))
                    comparison_feats.append(torch.abs(x1 - x2))

        return torch.cat(comparison_feats, dim=1) 
Example #27
Source File: dagmm.py    From pyodds with MIT License 5 votes vote down vote up
def forward(self, x):
        dec, enc = self.autoencoder(x, return_latent=True)

        rec_cosine = F.cosine_similarity(x.view(x.shape[0], -1), dec.view(dec.shape[0], -1), dim=1)
        rec_euclidean = self.relative_euclidean_distance(x.view(x.shape[0], -1), dec.view(dec.shape[0], -1), dim=1)

        # Concatenate latent representation, cosine similarity and relative Euclidean distance between x and dec(enc(x))
        z = torch.cat([enc, rec_euclidean.unsqueeze(-1), rec_cosine.unsqueeze(-1)], dim=1)
        gamma = self.estimation(z)

        return enc, dec, z, gamma 
Example #28
Source File: helper.py    From backdoor_federated_learning with MIT License 5 votes vote down vote up
def cos_sim_loss(self, model, target_vec):
        model_vec = self.get_one_vec(model, variable=True)
        target_var = Variable(target_vec, requires_grad=False)
        # target_vec.requires_grad = False
        cs_sim = torch.nn.functional.cosine_similarity(self.params['scale_weights']*(model_vec-target_var) + target_var, target_var, dim=0)
        # cs_sim = cs_loss(model_vec, target_vec)
        logger.info("los")
        logger.info( cs_sim.data[0])
        logger.info(torch.norm(model_vec - target_var).data[0])
        loss = 1-cs_sim

        return 1e3*loss 
Example #29
Source File: CalculateDistance.py    From NeuronBlocks with MIT License 5 votes vote down vote up
def forward(self, x, x_len, y, y_len):
        """

        Args:
            x: [batch_size, dim]
            x_len: [batch_size]
            y: [batch_size, dim]
            y_len: [batch_size]
        Returns:
            Tensor: [batch_size, 1], None

        """

        batch_size = x.size()[0]
        if "cos" in self.layer_conf.operations:
            result = F.cosine_similarity(x , y)
        elif "euclidean" in self.layer_conf.operations:
            result = torch.sqrt(torch.sum((x-y)**2, dim=1))
        elif "manhattan" in self.layer_conf.operations:
            result = torch.sum(torch.abs((x - y)), dim=1)
        elif "chebyshev" in self.layer_conf.operations:
            result = torch.abs((x - y)).max(dim=1)
        else:
            raise ConfigurationError("This operation is not supported!")

        result = result.view(batch_size, 1)
        return result, None 
Example #30
Source File: zil.py    From incremental_learning.pytorch with MIT License 5 votes vote down vote up
def semantic_regularization(
    features, targets, similarity_matrix, margin=None, aggreg="mean", factor=1.0, metric="cosine"
):
    pair_indexes = []

    np_targets = targets.cpu().numpy()

    for index, target in enumerate(np_targets):
        neg_indexes = np.where(np_targets != target)[0]
        neg_index = np.random.choice(neg_indexes)
        pair_indexes.append(tuple(sorted((index, neg_index))))

    pair_indexes_ = list(set(pair_indexes))
    pair_indexes = torch.tensor(pair_indexes_).long()

    left = features[pair_indexes[..., 0]]
    right = features[pair_indexes[..., 1]]
    if metric == "cosine":
        similarities = F.cosine_similarity(left, right)

        if margin is not None:
            margins = torch.ones_like(similarities) * margin
        else:
            margins = similarity_matrix[targets[pair_indexes[..., 0]], targets[pair_indexes[...,
                                                                                            1]]]

        hinges = torch.clamp(similarities - margins, min=0.)

        return factor * _aggreg(hinges, aggreg, features_dim=features.shape[1])
    elif metric == "gor":
        similarities = torch.sum(torch.mul(left, right), 1)
        return factor * _aggreg(similarities, aggreg, features_dim=features.shape[1])
    elif metric == "snr":
        noise = left - right
        var_noise = noise.var(axis=1, unbiased=True)
        var_anchor = right.var(axis=1, unbiased=True)

        dist = torch.mean(var_anchor / var_noise)
        return factor * dist
    else:
        raise NotImplementedError(f"Unknown metric: {metric}.")