Python torch.sort() Examples
The following are 30
code examples of torch.sort().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: modules.py From BAMnet with Apache License 2.0 | 7 votes |
def forward(self, x, x_len, atten_mask): CoAtt = torch.bmm(x, x.transpose(1, 2)) CoAtt = atten_mask.unsqueeze(1) * CoAtt - (1 - atten_mask).unsqueeze(1) * INF CoAtt = torch.softmax(CoAtt, dim=-1) new_x = torch.cat([torch.bmm(CoAtt, x), x], -1) sorted_x_len, indx = torch.sort(x_len, 0, descending=True) new_x = pack_padded_sequence(new_x[indx], sorted_x_len.data.tolist(), batch_first=True) h0 = to_cuda(torch.zeros(2, x_len.size(0), self.hidden_size // 2), self.use_cuda) c0 = to_cuda(torch.zeros(2, x_len.size(0), self.hidden_size // 2), self.use_cuda) packed_h, (packed_h_t, _) = self.model(new_x, (h0, c0)) # restore the sorting _, inverse_indx = torch.sort(indx, 0) packed_h_t = torch.cat([packed_h_t[i] for i in range(packed_h_t.size(0))], -1) restore_packed_h_t = packed_h_t[inverse_indx] output = restore_packed_h_t return output
Example #2
Source File: lovasz.py From argus-tgs-salt with MIT License | 6 votes |
def lovasz_softmax_flat(probas, labels, only_present=False): """ Multi-class Lovasz-Softmax loss probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) labels: [P] Tensor, ground truth labels (between 0 and C - 1) only_present: average only on classes present in ground truth """ C = probas.size(1) losses = [] for c in range(C): fg = (labels == c).float() # foreground for class c if only_present and fg.sum() == 0: continue errors = (Variable(fg) - probas[:, c]).abs() errors_sorted, perm = torch.sort(errors, 0, descending=True) perm = perm.data fg_sorted = fg[perm] losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted)))) return mean(losses)
Example #3
Source File: sparse_activations.py From ITDD with MIT License | 6 votes |
def _threshold_and_support(input, dim=0): """ Sparsemax building block: compute the threshold Parameters: input: any dimension dim: dimension along which to apply the sparsemax Returns: the threshold value """ input_srt, _ = torch.sort(input, descending=True, dim=dim) input_cumsum = input_srt.cumsum(dim) - 1 rhos = _make_ix_like(input, dim) support = rhos * input_srt > input_cumsum support_size = support.sum(dim=dim).unsqueeze(dim) tau = input_cumsum.gather(dim, support_size - 1) tau /= support_size.to(input.dtype) return tau, support_size
Example #4
Source File: utils.py From pretorched-x with MIT License | 6 votes |
def average_precision(output, target, difficult_examples=True): # sort examples sorted, indices = torch.sort(output, dim=0, descending=True) # Computes prec@i pos_count = 0. total_count = 0. precision_at_i = 0. for i in indices: label = target[i] if difficult_examples and label == 0: continue if label == 1: pos_count += 1 total_count += 1 if label == 1: precision_at_i += pos_count / total_count precision_at_i /= pos_count return precision_at_i
Example #5
Source File: losses.py From robosat with MIT License | 6 votes |
def forward(self, inputs, targets): N, C, H, W = inputs.size() masks = torch.zeros(N, C, H, W).to(targets.device).scatter_(1, targets.view(N, 1, H, W), 1) loss = 0. for mask, input in zip(masks.view(N, -1), inputs.view(N, -1)): max_margin_errors = 1. - ((mask * 2 - 1) * input) errors_sorted, indices = torch.sort(max_margin_errors, descending=True) labels_sorted = mask[indices.data] inter = labels_sorted.sum() - labels_sorted.cumsum(0) union = labels_sorted.sum() + (1. - labels_sorted).cumsum(0) iou = 1. - inter / union p = len(labels_sorted) if p > 1: iou[1:p] = iou[1:p] - iou[0:-1] loss += torch.dot(nn.functional.relu(errors_sorted), iou) return loss / N
Example #6
Source File: test_websocket_worker.py From PySyft with Apache License 2.0 | 6 votes |
def test_websocket_worker_multiple_output_response(hook, start_remote_worker): """Evaluates that you can do basic tensor operations using WebsocketServerWorker.""" server, remote_proxy = start_remote_worker(id="socket_multiple_output", hook=hook, port=8771) x = torch.tensor([1.0, 3, 2]) x = x.send(remote_proxy) p1, p2 = torch.sort(x) x1, x2 = p1.get(), p2.get() assert (x1 == torch.tensor([1.0, 2, 3])).all() assert (x2 == torch.tensor([0, 2, 1])).all() x.get() # retrieve remote object before closing the websocket connection remote_proxy.close() server.terminate()
Example #7
Source File: utils.py From ICDAR-2019-SROIE with MIT License | 6 votes |
def cxcy_to_gcxgcy(cxcy, priors_cxcy): """ Encode bounding boxes (that are in center-size form) w.r.t. the corresponding prior boxes (that are in center-size form). For the center coordinates, find the offset with respect to the prior box, and scale by the size of the prior box. For the size coordinates, scale by the size of the prior box, and convert to the log-space. In the model, we are predicting bounding box coordinates in this encoded form. :param cxcy: bounding boxes in center-size coordinates, a tensor of size (n_priors, 4) :param priors_cxcy: prior boxes with respect to which the encoding must be performed, a tensor of size (n_priors, 4) :return: encoded bounding boxes, a tensor of size (n_priors, 4) """ # The 10 and 5 below are referred to as 'variances' in the original Caffe repo, completely empirical # They are for some sort of numerical conditioning, for 'scaling the localization gradient' # See https://github.com/weiliu89/caffe/issues/155 return torch.cat([(cxcy[:, :2] - priors_cxcy[:, :2]) / (priors_cxcy[:, 2:] / 10), # g_c_x, g_c_y torch.log(cxcy[:, 2:] / priors_cxcy[:, 2:]) * 5], 1) # g_w, g_h
Example #8
Source File: utils.py From pretorched-x with MIT License | 6 votes |
def value(self): """Returns the model's average precision for each class Return: ap (FloatTensor): 1xK tensor, with avg precision for each class k """ if self.scores.numel() == 0: return 0 ap = torch.zeros(self.scores.size(1)) rg = torch.arange(1, self.scores.size(0)).float() # compute average precision for each class for k in range(self.scores.size(1)): # sort scores scores = self.scores[:, k] targets = self.targets[:, k] # compute average precision ap[k] = AveragePrecisionMeter.average_precision(scores, targets, self.difficult_examples) return ap
Example #9
Source File: utils.py From Attentional-PointNet with GNU General Public License v3.0 | 6 votes |
def nms(boxes, nms_thresh): if len(boxes) == 0: return boxes det_confs = torch.zeros(len(boxes)) for i in range(len(boxes)): det_confs[i] = 1-boxes[i][4] _,sortIds = torch.sort(det_confs) out_boxes = [] for i in range(len(boxes)): box_i = boxes[sortIds[i]] if box_i[4] > 0: out_boxes.append(box_i) for j in range(i+1, len(boxes)): box_j = boxes[sortIds[j]] if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh: #print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False)) box_j[4] = 0 return out_boxes
Example #10
Source File: utils.py From OpenKiwi with GNU Affero General Public License v3.0 | 6 votes |
def apply_packed_sequence(rnn, embedding, lengths): """ Runs a forward pass of embeddings through an rnn using packed sequence. Args: rnn: The RNN that that we want to compute a forward pass with. embedding (FloatTensor b x seq x dim): A batch of sequence embeddings. lengths (LongTensor batch): The length of each sequence in the batch. Returns: output: The output of the RNN `rnn` with input `embedding` """ # Sort Batch by sequence length lengths_sorted, permutation = torch.sort(lengths, descending=True) embedding_sorted = embedding[permutation] # Use Packed Sequence embedding_packed = pack(embedding_sorted, lengths_sorted, batch_first=True) outputs_packed, (hidden, cell) = rnn(embedding_packed) outputs_sorted, _ = unpack(outputs_packed, batch_first=True) # Restore original order _, permutation_rev = torch.sort(permutation, descending=False) outputs = outputs_sorted[permutation_rev] hidden, cell = hidden[:, permutation_rev], cell[:, permutation_rev] return outputs, (hidden, cell)
Example #11
Source File: lovasz_losses.py From PolarSeg with BSD 3-Clause "New" or "Revised" License | 6 votes |
def lovasz_hinge_flat(logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\infty and +\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: # only void pixels, the gradients should be 0 return logits.sum() * 0. signs = 2. * labels.float() - 1. errors = (1. - logits * Variable(signs)) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) loss = torch.dot(F.relu(errors_sorted), Variable(grad)) return loss
Example #12
Source File: lovasz_hinge_loss.py From Parsing-R-CNN with MIT License | 6 votes |
def lovasz_hinge_flat(self, logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\infty and +\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: # only void pixels, the gradients should be 0 return logits.sum() * 0. signs = 2. * labels.float() - 1. errors = (1. - logits * Variable(signs)) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) loss = torch.dot(F.relu(errors_sorted), Variable(grad)) return loss
Example #13
Source File: metrics.py From visdial-challenge-starter-pytorch with BSD 3-Clause "New" or "Revised" License | 6 votes |
def scores_to_ranks(scores: torch.Tensor): """Convert model output scores into ranks.""" batch_size, num_rounds, num_options = scores.size() scores = scores.view(-1, num_options) # sort in descending order - largest score gets highest rank sorted_ranks, ranked_idx = scores.sort(1, descending=True) # i-th position in ranked_idx specifies which score shall take this # position but we want i-th position to have rank of score at that # position, do this conversion ranks = ranked_idx.clone().fill_(0) for i in range(ranked_idx.size(0)): for j in range(num_options): ranks[i][ranked_idx[i][j]] = j # convert from 0-99 ranks to 1-100 ranks ranks += 1 ranks = ranks.view(batch_size, num_rounds, num_options) return ranks
Example #14
Source File: activations.py From entmax with MIT License | 6 votes |
def __init__(self, dim=-1, k=None): """sparsemax: normalizing sparse transform (a la softmax). Solves the projection: min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1. Parameters ---------- dim : int The dimension along which to apply sparsemax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. """ self.dim = dim self.k = k super(Sparsemax, self).__init__()
Example #15
Source File: Losses.py From pneumothorax-segmentation with MIT License | 6 votes |
def lovasz_hinge_flat(logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\infty and +\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: # only void pixels, the gradients should be 0 return logits.sum() * 0. signs = 2. * labels.float() - 1. errors = (1. - logits * Variable(signs)) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) loss = torch.dot(F.relu(errors_sorted), Variable(grad)) return loss
Example #16
Source File: losses.py From centerpose with MIT License | 6 votes |
def lovasz_hinge_flat(logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\infty and +\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: # only void pixels, the gradients should be 0 return logits.sum() * 0. signs = 2. * labels.float() - 1. errors = (1. - logits * Variable(signs)) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) loss = torch.dot(F.elu(errors_sorted) + 1, Variable(grad)) return loss
Example #17
Source File: lovasz_losses.py From ext_portrait_segmentation with MIT License | 6 votes |
def lovasz_hinge_flat(self, logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\infty and +\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: # only void pixels, the gradients should be 0 return logits.sum() * 0. signs = 2. * labels.float() - 1. errors = (1. - logits * Variable(signs)) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) loss = torch.dot(F.relu(errors_sorted), Variable(grad)) return loss
Example #18
Source File: utils.py From dgl with Apache License 2.0 | 6 votes |
def perturb_s_and_get_filtered_rank(embedding, w, s, r, o, test_size, triplets_to_filter): """ Perturb subject in the triplets """ num_entities = embedding.shape[0] ranks = [] for idx in range(test_size): if idx % 100 == 0: print("test triplet {} / {}".format(idx, test_size)) target_s = s[idx] target_r = r[idx] target_o = o[idx] filtered_s = filter_s(triplets_to_filter, target_s, target_r, target_o, num_entities) target_s_idx = int((filtered_s == target_s).nonzero()) emb_s = embedding[filtered_s] emb_r = w[target_r] emb_o = embedding[target_o] emb_triplet = emb_s * emb_r * emb_o scores = torch.sigmoid(torch.sum(emb_triplet, dim=1)) _, indices = torch.sort(scores, descending=True) rank = int((indices == target_s_idx).nonzero()) ranks.append(rank) return torch.LongTensor(ranks)
Example #19
Source File: utils.py From dgl with Apache License 2.0 | 6 votes |
def perturb_o_and_get_filtered_rank(embedding, w, s, r, o, test_size, triplets_to_filter): """ Perturb object in the triplets """ num_entities = embedding.shape[0] ranks = [] for idx in range(test_size): if idx % 100 == 0: print("test triplet {} / {}".format(idx, test_size)) target_s = s[idx] target_r = r[idx] target_o = o[idx] filtered_o = filter_o(triplets_to_filter, target_s, target_r, target_o, num_entities) target_o_idx = int((filtered_o == target_o).nonzero()) emb_s = embedding[target_s] emb_r = w[target_r] emb_o = embedding[filtered_o] emb_triplet = emb_s * emb_r * emb_o scores = torch.sigmoid(torch.sum(emb_triplet, dim=1)) _, indices = torch.sort(scores, descending=True) rank = int((indices == target_o_idx).nonzero()) ranks.append(rank) return torch.LongTensor(ranks)
Example #20
Source File: gdc.py From pytorch_geometric with MIT License | 6 votes |
def __calculate_eps__(self, matrix, num_nodes, avg_degree): r"""Calculates threshold necessary to achieve a given average degree. Args: matrix (Tensor): Adjacency matrix or edge weights. num_nodes (int): Number of nodes. avg_degree (int): Target average degree. :rtype: (:class:`float`) """ sorted_edges = torch.sort(matrix.flatten(), descending=True).values if avg_degree * num_nodes > len(sorted_edges): return -np.inf left = sorted_edges[avg_degree * num_nodes - 1] right = sorted_edges[avg_degree * num_nodes] return (left + right) / 2.0
Example #21
Source File: lovasz_losses.py From open-solution-salt-identification with MIT License | 6 votes |
def lovasz_hinge_flat(logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\infty and +\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: # only void pixels, the gradients should be 0 return logits.sum() * 0. signs = 2. * labels.float() - 1. errors = (1. - logits * signs) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) loss = torch.dot(F.elu(errors_sorted), grad) return loss
Example #22
Source File: lovasz_losses.py From open-solution-salt-identification with MIT License | 6 votes |
def lovasz_softmax_flat(probas, labels, only_present=False): """ Multi-class Lovasz-Softmax loss probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) labels: [P] Tensor, ground truth labels (between 0 and C - 1) only_present: average only on classes present in ground truth """ C = probas.size(1) losses = [] for c in range(C): fg = (labels == c).float() # foreground for class c if only_present and fg.sum() == 0: continue errors = (fg - probas[:, c]).abs() errors_sorted, perm = torch.sort(errors, 0, descending=True) perm = perm.data fg_sorted = fg[perm] losses.append(torch.dot(errors_sorted, lovasz_grad(fg_sorted))) return mean(losses)
Example #23
Source File: lovash_losses.py From open-solution-salt-identification with MIT License | 6 votes |
def lovasz_hinge_flat(logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\infty and +\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: # only void pixels, the gradients should be 0 return logits.sum() * 0. signs = 2. * labels.float() - 1. errors = (1. - logits * Variable(signs)) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) loss = torch.dot(F.relu(errors_sorted), Variable(grad)) return loss
Example #24
Source File: lovash_losses.py From open-solution-salt-identification with MIT License | 6 votes |
def lovasz_softmax_flat(probas, labels, only_present=False): """ Multi-class Lovasz-Softmax loss probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) labels: [P] Tensor, ground truth labels (between 0 and C - 1) only_present: average only on classes present in ground truth """ C = probas.size(1) losses = [] for c in range(C): fg = (labels == c).float() # foreground for class c if only_present and fg.sum() == 0: continue errors = (fg - probas[:, c]).abs() errors_sorted, perm = torch.sort(errors, 0, descending=True) perm = perm.data fg_sorted = fg[perm] losses.append(torch.dot(errors_sorted, lovasz_grad(fg_sorted))) return mean(losses)
Example #25
Source File: lovasz_losses.py From SegmenTron with Apache License 2.0 | 6 votes |
def lovasz_hinge_flat(logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\infty and +\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: # only void pixels, the gradients should be 0 return logits.sum() * 0. signs = 2. * labels.float() - 1. errors = (1. - logits * Variable(signs)) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) loss = torch.dot(F.relu(errors_sorted), Variable(grad)) return loss
Example #26
Source File: lovasz.py From argus-tgs-salt with MIT License | 6 votes |
def lovasz_hinge_flat(logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\infty and +\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: # only void pixels, the gradients should be 0 return logits.sum() * 0. signs = 2. * labels.float() - 1. errors = (1. - logits * Variable(signs)) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) loss = torch.dot(F.elu(errors_sorted) + 1, Variable(grad)) return loss
Example #27
Source File: tracking_utils.py From pytorch-detect-to-track with MIT License | 6 votes |
def _keep_top_k(self, boxes, box_inds, end_at, top_k, thresh): '''Set dynamic class threshold based on average detections per class per frame (before nms) and keep boxes above this threshold. ''' box_list = boxes[:end_at].tolist() box_list = [box for box in box_list if box is not None] if len(box_list)==0: return X = torch.cat(box_list, dim=0) if X.size(0) == 0: return scores,_ = torch.sort(X[:,4], descending=True) # set threshold for this class thresh = scores[min(scores.numel(), top_k)] for image_index in range(end_at): if boxes[image_index] is not None and boxes[image_index].size(0)>0: bbox = boxes[image_index] keep = torch.nonzero(bbox[:,4]>=thresh).view(-1) if keep.numel()==0: continue boxes[image_index] = bbox[keep] box_inds[image_index] = box_inds[image_index][keep] return boxes, box_inds, thresh
Example #28
Source File: beam_search.py From meshed-memory-transformer with BSD 3-Clause "New" or "Revised" License | 5 votes |
def select(self, t, candidate_logprob, **kwargs): selected_logprob, selected_idx = torch.sort(candidate_logprob.view(self.b_s, -1), -1, descending=True) selected_logprob, selected_idx = selected_logprob[:, :self.beam_size], selected_idx[:, :self.beam_size] return selected_idx, selected_logprob
Example #29
Source File: tensor.py From dgl with Apache License 2.0 | 5 votes |
def sort_1d(input): return th.sort(input)
Example #30
Source File: utils.py From dgl with Apache License 2.0 | 5 votes |
def sort_and_rank(score, target): _, indices = torch.sort(score, dim=1, descending=True) indices = torch.nonzero(indices == target.view(-1, 1)) indices = indices[:, 1].view(-1) return indices