Python torch.LongTensor() Examples

The following are 30 code examples of torch.LongTensor(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: test_sampler.py    From mmdetection with Apache License 2.0 7 votes vote down vote up
def test_random_sampler_empty_pred():
    assigner = MaxIoUAssigner(
        pos_iou_thr=0.5,
        neg_iou_thr=0.5,
        ignore_iof_thr=0.5,
        ignore_wrt_candidates=False,
    )
    bboxes = torch.empty(0, 4)
    gt_bboxes = torch.FloatTensor([
        [0, 0, 10, 9],
        [0, 10, 10, 19],
    ])
    gt_labels = torch.LongTensor([1, 2])
    assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)

    sampler = RandomSampler(
        num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)

    sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)

    assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
    assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds) 
Example #2
Source File: model.py    From VSE-C with MIT License 7 votes vote down vote up
def forward(self, encoding, lengths):
        lengths = Variable(torch.LongTensor(lengths))
        if torch.cuda.is_available():
            lengths = lengths.cuda()
        if self.method == 'mean':
            encoding_pad = nn.utils.rnn.pack_padded_sequence(encoding, lengths.data.tolist(), batch_first=True)
            encoding = nn.utils.rnn.pad_packed_sequence(encoding_pad, batch_first=True, padding_value=0)[0]
            lengths = lengths.float().view(-1, 1)
            return encoding.sum(1) / lengths, None
        elif self.method == 'max':
            return encoding.max(1)  # [bsz, in_dim], [bsz, in_dim] (position)
        elif self.method == 'attn':
            size = encoding.size()  # [bsz, len, in_dim]
            x_flat = encoding.contiguous().view(-1, size[2])  # [bsz*len, in_dim]
            hbar = self.tanh(self.ws1(x_flat))  # [bsz*len, attn_hid]
            alphas = self.ws2(hbar).view(size[0], size[1])  # [bsz, len]
            alphas = nn.utils.rnn.pack_padded_sequence(alphas, lengths.data.tolist(), batch_first=True)
            alphas = nn.utils.rnn.pad_packed_sequence(alphas, batch_first=True, padding_value=-1e8)[0]
            alphas = functional.softmax(alphas, dim=1)  # [bsz, len]
            alphas = alphas.view(size[0], 1, size[1])  # [bsz, 1, len]
            return torch.bmm(alphas, encoding).squeeze(1), alphas  # [bsz, in_dim], [bsz, len]
        elif self.method == 'last':
            return torch.cat([encoding[i][lengths[i] - 1] for i in range(encoding.size(0))], dim=0), None 
Example #3
Source File: resnet18_image2imu_regress.py    From dogTorch with MIT License 6 votes vote down vote up
def forward(self, input, target):
        features = self.feats(input)
        output_indices = list(range(0, (target.size(1))))
        # Iterate over fully connecteds for each imu, perform forward pass and
        # record the output.
        imu_out = []
        for i in self.imus:
            imu_i = getattr(self, 'imu{}'.format(i))
            imu_out.append(imu_i(features))
        # Add a singleton dim at 1 for sequence length, which is always 1 in
        # this model.
        output = torch.stack(imu_out, dim=1).unsqueeze(1)
        output /= output.norm(2, 3, keepdim=True)
        return torch.stack(
            imu_out,
            dim=1).unsqueeze(1), target, torch.LongTensor(output_indices) 
Example #4
Source File: conceptnet.py    From comet-commonsense with Apache License 2.0 6 votes vote down vote up
def shuffle_sequences(self, split="train", keys=None):
        if keys is None:
            # print(type(self.data))
            # print(type(self.data.keys()))
            keys = self.data[split].keys()

        for key in keys:
            if key in ["positive", "negative"]:
                continue
            idxs = list(range(len(self.data[split][key])))

            random.shuffle(idxs)

            self.sequences[split][key] = \
                self.sequences[split][key].index_select(
                    0, torch.LongTensor(idxs))

            temp = [self.data[split][key][i] for i in idxs]
            self.data[split][key] = temp

            temp = [self.masks[split][key][i] for i in idxs]
            self.masks[split][key] = temp 
Example #5
Source File: DDPAE_utils.py    From DDPAE-video-prediction with MIT License 6 votes vote down vote up
def pose_inv_full(pose):
  '''
  param pose: N x 6
  Inverse the 2x3 transformer matrix.
  '''
  N, _ = pose.size()
  b = pose.view(N, 2, 3)[:, :, 2:]
  # A^{-1}
  # Calculate determinant
  determinant = (pose[:, 0] * pose[:, 4] - pose[:, 1] * pose[:, 3] + 1e-8).view(N, 1)
  indices = Variable(torch.LongTensor([4, 1, 3, 0]).cuda())
  scale = Variable(torch.Tensor([1, -1, -1, 1]).cuda())
  A_inv = torch.index_select(pose, 1, indices) * scale / determinant
  A_inv = A_inv.view(N, 2, 2)
  # b' = - A^{-1} b
  b_inv = - A_inv.matmul(b).view(N, 2, 1)
  transformer_inv = torch.cat([A_inv, b_inv], dim=2)
  return transformer_inv 
Example #6
Source File: Data.py    From GST-Tacotron with MIT License 6 votes vote down vote up
def get_aishell_data(data_dir, r):
    path = os.path.join(data_dir, 'transcript.txt')
    data_dir = os.path.join(data_dir, 'wav', 'train')
    wav_paths = []
    texts = []
    with open(path, 'r') as f:
        for line in f.readlines():
            items = line.strip().split('|')
            wav_paths.append(os.path.join(data_dir, items[0] + '.wav'))
            text = items[1]
            text = text_normalize(text) + 'E'
            text = [hp.char2idx[c] for c in text]
            text = torch.Tensor(text).type(torch.LongTensor)
            texts.append(text)

    for wav, txt in zip(wav_paths[-20:], texts[-20:]):
        print(wav, txt)

    return wav_paths[r], texts[r] 
Example #7
Source File: Data.py    From GST-Tacotron with MIT License 6 votes vote down vote up
def get_keda_data(dataset_dir, r):
    wav_paths = []
    texts = []

    wav_dirs = ['nannan', 'xiaofeng', 'donaldduck']
    csv_paths = ['transcript-nannan.csv', 'transcript-xiaofeng.csv', 'transcript-donaldduck.csv']
    for wav_dir, csv_path in zip(wav_dirs, csv_paths):
        csv = open(os.path.join(dataset_dir, csv_path), 'r')
        for line in csv.readlines():
            items = line.strip().split('|')
            wav_paths.append(os.path.join(dataset_dir, wav_dir, items[0] + '.wav'))
            text = text_normalize(items[1]) + 'E'
            text = [hp.char2idx[c] for c in text]
            text = torch.Tensor(text).type(torch.LongTensor)
            texts.append(text)
        csv.close()

    for wav in wav_paths[-20:]:
        print(wav)

    return wav_paths[r], texts[r] 
Example #8
Source File: Data.py    From GST-Tacotron with MIT License 6 votes vote down vote up
def get_LJ_data(data_dir, r):
    path = os.path.join(data_dir, 'transcript.csv')
    data_dir = os.path.join(data_dir, 'wavs')
    wav_paths = []
    texts = []
    with open(path, 'r') as f:
        for line in f.readlines():
            items = line.strip().split('|')
            wav_paths.append(os.path.join(data_dir, items[0] + '.wav'))
            text = items[1]
            text = text_normalize(text) + 'E'
            text = [hp.char2idx[c] for c in text]
            text = torch.Tensor(text).type(torch.LongTensor)
            texts.append(text)

    for wav in wav_paths[-20:]:
        print(wav)

    return wav_paths[r], texts[r] 
Example #9
Source File: functions.py    From comet-commonsense with Apache License 2.0 6 votes vote down vote up
def set_conceptnet_inputs(input_event, relation, text_encoder, max_e1, max_r, force):
    abort = False

    e1_tokens, rel_tokens, _ = data.conceptnet_data.do_example(text_encoder, input_event, relation, None)

    if len(e1_tokens) >  max_e1:
        if force:
            XMB = torch.zeros(1, len(e1_tokens) + max_r).long().to(cfg.device)
        else:
            XMB = torch.zeros(1, max_e1 + max_r).long().to(cfg.device)
            return {}, True
    else:
        XMB = torch.zeros(1, max_e1 + max_r).long().to(cfg.device)

    XMB[:, :len(e1_tokens)] = torch.LongTensor(e1_tokens)
    XMB[:, max_e1:max_e1 + len(rel_tokens)] = torch.LongTensor(rel_tokens)

    batch = {}
    batch["sequences"] = XMB
    batch["attention_mask"] = data.conceptnet_data.make_attention_mask(XMB)

    return batch, abort 
Example #10
Source File: model_architecture.py    From models with MIT License 6 votes vote down vote up
def forward(self, input):
        # array has shape (N, 4, 1, 1000)
        # return the sequence + its RC concatenated
        # create inverted indices
        invert_dims = [1,3]
        input_bkup = input
        for idim in invert_dims:
            idxs = [i for i in range(input.size(idim)-1, -1, -1)]
            idxs_var = Variable(torch.LongTensor(idxs))
            if input.is_cuda:
                idxs_var =idxs_var.cuda()
            input = input.index_select(idim, idxs_var)
        #
        input = torch.cat([input_bkup, input], dim=0)
        #
        # Using numpy:
        #input = edit_tensor_in_numpy(input, lambda x: np.concatenate([x, x[:,::-1, : ,::-1]],axis=0))
        return input 
Example #11
Source File: test_assigner.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def test_max_iou_assigner():
    self = MaxIoUAssigner(
        pos_iou_thr=0.5,
        neg_iou_thr=0.5,
    )
    bboxes = torch.FloatTensor([
        [0, 0, 10, 10],
        [10, 10, 20, 20],
        [5, 5, 15, 15],
        [32, 32, 38, 42],
    ])
    gt_bboxes = torch.FloatTensor([
        [0, 0, 10, 9],
        [0, 10, 10, 19],
    ])
    gt_labels = torch.LongTensor([2, 3])
    assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
    assert len(assign_result.gt_inds) == 4
    assert len(assign_result.labels) == 4

    expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
    assert torch.all(assign_result.gt_inds == expected_gt_inds) 
Example #12
Source File: anchor_generator.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def gen_base_anchors(self):
        """Generate base anchors.

        Returns:
            list(torch.Tensor): Base anchors of a feature grid in multiple
                feature levels.
        """
        multi_level_base_anchors = []
        for i, base_size in enumerate(self.base_sizes):
            base_anchors = self.gen_single_level_base_anchors(
                base_size,
                scales=self.scales[i],
                ratios=self.ratios[i],
                center=self.centers[i])
            indices = list(range(len(self.ratios[i])))
            indices.insert(1, len(indices))
            base_anchors = torch.index_select(base_anchors, 0,
                                              torch.LongTensor(indices))
            multi_level_base_anchors.append(base_anchors)
        return multi_level_base_anchors 
Example #13
Source File: test_assigner.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def test_max_iou_assigner_with_ignore():
    self = MaxIoUAssigner(
        pos_iou_thr=0.5,
        neg_iou_thr=0.5,
        ignore_iof_thr=0.5,
        ignore_wrt_candidates=False,
    )
    bboxes = torch.FloatTensor([
        [0, 0, 10, 10],
        [10, 10, 20, 20],
        [5, 5, 15, 15],
        [30, 32, 40, 42],
    ])
    gt_bboxes = torch.FloatTensor([
        [0, 0, 10, 9],
        [0, 10, 10, 19],
    ])
    gt_bboxes_ignore = torch.Tensor([
        [30, 30, 40, 40],
    ])
    assign_result = self.assign(
        bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore)

    expected_gt_inds = torch.LongTensor([1, 0, 2, -1])
    assert torch.all(assign_result.gt_inds == expected_gt_inds) 
Example #14
Source File: Data.py    From GST-Tacotron with MIT License 6 votes vote down vote up
def get_blizzard_data(data_dir, r):
    file_list = './filelists/bliz13_audio_text_train_filelist.txt'

    texts = []
    wav_paths = []
    with open(file_list, 'r') as f:
        for line in f.readlines():
            wav_path, text = line.strip().split('|')
            wav_paths.append(os.path.join(data_dir, wav_path))

            text = text_normalize(text) + 'E'
            text = [hp.char2idx[c] for c in text]
            text = torch.Tensor(text).type(torch.LongTensor)
            texts.append(text)

    for wav in wav_paths[-20:]:
        print(wav)

    return wav_paths[r], texts[r] 
Example #15
Source File: resnet_one_tower_baseline.py    From dogTorch with MIT License 6 votes vote down vote up
def __init__(self, args):
        super(ResNet18Image2IMUOneTower, self).__init__()
        assert args.sequence_length == 1, "ResNet18Image2IMU supports seq-len=1"

        self.class_weights = args.dataset.CLASS_WEIGHTS[torch.LongTensor(
            args.imus)]

        resnet_model = torchvision_resnet18(pretrained=args.pretrain)
        # Remove the last fully connected layer.
        del resnet_model.fc
        self.resnet = resnet_model

        self.resnet.conv1 = nn.Conv2d(6, 64, kernel_size=(7, 7), stride=(2, 2),
                                      padding=(3, 3), bias=False)

        num_features = 512
        num_frames = 1
        num_classes = args.num_classes
        # Make num_imu fc layers
        self.imus = args.imus
        for i in self.imus:
            setattr(self, 'imu{}'.format(i),
                    nn.Linear(num_frames * num_features, num_classes))
        self.dropout = nn.Dropout() 
Example #16
Source File: test_assigner.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def test_max_iou_assigner_with_empty_gt():
    """Test corner case where an image might have no true detections."""
    self = MaxIoUAssigner(
        pos_iou_thr=0.5,
        neg_iou_thr=0.5,
    )
    bboxes = torch.FloatTensor([
        [0, 0, 10, 10],
        [10, 10, 20, 20],
        [5, 5, 15, 15],
        [32, 32, 38, 42],
    ])
    gt_bboxes = torch.FloatTensor([])
    assign_result = self.assign(bboxes, gt_bboxes)

    expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
    assert torch.all(assign_result.gt_inds == expected_gt_inds) 
Example #17
Source File: test_assigner.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def test_approx_iou_assigner():
    self = ApproxMaxIoUAssigner(
        pos_iou_thr=0.5,
        neg_iou_thr=0.5,
    )
    bboxes = torch.FloatTensor([
        [0, 0, 10, 10],
        [10, 10, 20, 20],
        [5, 5, 15, 15],
        [32, 32, 38, 42],
    ])
    gt_bboxes = torch.FloatTensor([
        [0, 0, 10, 9],
        [0, 10, 10, 19],
    ])
    approxs_per_octave = 1
    approxs = bboxes
    squares = bboxes
    assign_result = self.assign(approxs, squares, approxs_per_octave,
                                gt_bboxes)

    expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
    assert torch.all(assign_result.gt_inds == expected_gt_inds) 
Example #18
Source File: test_assigner.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def test_approx_iou_assigner_with_empty_gt():
    """Test corner case where an image might have no true detections."""
    self = ApproxMaxIoUAssigner(
        pos_iou_thr=0.5,
        neg_iou_thr=0.5,
    )
    bboxes = torch.FloatTensor([
        [0, 0, 10, 10],
        [10, 10, 20, 20],
        [5, 5, 15, 15],
        [32, 32, 38, 42],
    ])
    gt_bboxes = torch.FloatTensor([])
    approxs_per_octave = 1
    approxs = bboxes
    squares = bboxes
    assign_result = self.assign(approxs, squares, approxs_per_octave,
                                gt_bboxes)

    expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
    assert torch.all(assign_result.gt_inds == expected_gt_inds) 
Example #19
Source File: model_architecture.py    From models with MIT License 5 votes vote down vote up
def forward(self, input):
        # Swap ACGT to AGCT
        # array has shape (N, 4, 1, 1000)
        # pytorch doesn't support full indexing at the moment, at some point this should work: [:,:,torch.LongTensor([0,2,1,3])]
        input_reordered = [input[:,i,...] for i in [0,2,1,3]]
        input = torch.stack(input_reordered, dim=1)
        # slightly faster but ugly:
        #input = edit_tensor_in_numpy(input, lambda x: x[:,[0,2,1,3], ...])
        return input 
Example #20
Source File: demo_grec_mpnn.py    From nmp_qc with MIT License 5 votes vote down vote up
def validate(val_loader, model, criterion, evaluation, logger=None):
    losses = AverageMeter()
    accuracies = AverageMeter()

    # switch to evaluate mode
    model.eval()

    for i, (g, h, e, target) in enumerate(val_loader):

        # Prepare input data
        target = torch.squeeze(target).type(torch.LongTensor)
        if args.cuda:
            g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
        g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)

        # Compute output
        output = model(g, h, e)

        # Logs
        test_loss = criterion(output, target)
        acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])

        losses.update(test_loss.data[0], g.size(0))
        accuracies.update(acc.data[0], g.size(0))

    print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
          .format(acc=accuracies, loss=losses))

    if logger is not None:
        logger.log_value('test_epoch_loss', losses.avg)
        logger.log_value('test_epoch_accuracy', accuracies.avg)

    return accuracies.avg 
Example #21
Source File: voc2012.py    From Pytorch-Project-Template with MIT License 5 votes vote down vote up
def __getitem__(self, index):
        if self.mode == 'test':
            img_path, img_name = self.imgs[index]
            img = Image.open(os.path.join(img_path, img_name + '.jpg')).convert('RGB')
            if self.transform is not None:
                img = self.transform(img)
            return img_name, img

        img_path, mask_path = self.imgs[index]
        img = Image.open(img_path).convert('RGB')
        if self.mode == 'train':
            mask = sio.loadmat(mask_path)['GTcls']['Segmentation'][0][0]
            mask = Image.fromarray(mask.astype(np.uint8))
        else:
            mask = Image.open(mask_path)

        if self.joint_transform is not None:
            img, mask = self.joint_transform(img, mask)

        if self.sliding_crop is not None:
            img_slices, mask_slices, slices_info = self.sliding_crop(img, mask)
            if self.transform is not None:
                img_slices = [self.transform(e) for e in img_slices]
            if self.target_transform is not None:
                mask_slices = [self.target_transform(e) for e in mask_slices]
            img, mask = torch.stack(img_slices, 0), torch.stack(mask_slices, 0)
            return img, mask, torch.LongTensor(slices_info)
        else:
            if self.transform is not None:
                img = self.transform(img)
            if self.target_transform is not None:
                mask = self.target_transform(mask)
            return img, mask 
Example #22
Source File: models_nearest_neighbor_planning.py    From dogTorch with MIT License 5 votes vote down vote up
def forward(self, input, target):
        if self.training:
            raise RuntimeError("Can't do forward pass in training.")
        imu_start_index = 1 #inclusive
        imu_end_index = imu_start_index + self.planning_distance #exclusive
        assert imu_start_index > 0 and imu_end_index < self.sequence_length
        input_start = input[:, :imu_start_index]
        input_end = input[:, imu_end_index:]
        target = target[:, imu_start_index:imu_end_index]
        output_indices = list(range(imu_start_index, imu_end_index))
        input = torch.cat([input_start, input_end],1)

        input = input.data.contiguous().view(input.size(0), -1)
        new_tensor = input.new
        min_distances = new_tensor(input.size(0)).fill_(1e9)
        best_labels = new_tensor(target.size()).long().fill_(-1)
        for i, (train_data, train_label, _, _) in enumerate(self.train_loader):
            train_data_start = train_data[:, :imu_start_index]
            train_data_end = train_data[:, imu_end_index:]
            train_data = torch.cat([train_data_start, train_data_end],1).contiguous()
            train_data = train_data.view(train_data.size(0), -1).cuda(async=True)
            train_label = train_label[:, imu_start_index:imu_end_index].contiguous()
            train_label = train_label.cuda(async=True)

            distances = -torch.mm(input, train_data.t())
            cur_min_distances, min_indices = distances.min(1)
            min_indices = min_indices[:,None,None].expand_as(best_labels)
            cur_labels = train_label.gather(0, min_indices)
            old_new_distances = torch.stack([min_distances, cur_min_distances])
            min_distances, picker = old_new_distances.min(0)
            picker = picker[:,None,None].expand_as(best_labels)
            best_labels = (1 - picker) * best_labels + picker * cur_labels
        output = new_tensor(*best_labels.size(), self.num_classes).fill_(0)
        output.scatter_(3, best_labels.unsqueeze(3), 1)
        output = Variable(output)
        return output, target, torch.LongTensor(output_indices) 
Example #23
Source File: demo_letter_ggnn.py    From nmp_qc with MIT License 5 votes vote down vote up
def validate(val_loader, model, criterion, evaluation, logger=None):
    losses = AverageMeter()
    accuracies = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (g, h, e, target) in enumerate(val_loader):

        # Prepare input data
        target = torch.squeeze(target).type(torch.LongTensor)
        if args.cuda:
            g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
        g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)

        # Compute output
        output = model(g, h, e)

        # Logs
        test_loss = criterion(output, target)
        acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])

        losses.update(test_loss.data[0], g.size(0))
        accuracies.update(acc.data[0], g.size(0))

    print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
          .format(acc=accuracies, loss=losses))

    if logger is not None:
        logger.log_value('test_epoch_loss', losses.avg)
        logger.log_value('test_epoch_accuracy', accuracies.avg)

    return accuracies.avg 
Example #24
Source File: demo_letter_duvenaud.py    From nmp_qc with MIT License 5 votes vote down vote up
def validate(val_loader, model, criterion, evaluation, logger=None):
    losses = AverageMeter()
    accuracies = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (g, h, e, target) in enumerate(val_loader):

        # Prepare input data
        target = torch.squeeze(target).type(torch.LongTensor)
        if args.cuda:
            g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
        g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)

        # Compute output
        output = model(g, h, e)

        # Logs
        test_loss = criterion(output, target)
        acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])

        losses.update(test_loss.data[0], g.size(0))
        accuracies.update(acc.data[0], g.size(0))

    print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
          .format(acc=accuracies, loss=losses))

    if logger is not None:
        logger.log_value('test_epoch_loss', losses.avg)
        logger.log_value('test_epoch_accuracy', accuracies.avg)

    return accuracies.avg 
Example #25
Source File: resnet_one_tower_baseline.py    From dogTorch with MIT License 5 votes vote down vote up
def forward(self, input, target):
        features = self.feats(input)
        output_indices = list(range(0, (target.size(1))))
        # Iterate over fully connecteds for each imu, perform forward pass and
        # record the output.
        imu_out = []
        for i in self.imus:
            imu_i = getattr(self, 'imu{}'.format(i))
            imu_out.append(imu_i(features))
        # Add a singleton dim at 1 for sequence length, which is always 1 in
        # this model.
        return torch.stack(
            imu_out,
            dim=1).unsqueeze(1), target, torch.LongTensor(output_indices) 
Example #26
Source File: pth_nms.py    From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License 5 votes vote down vote up
def pth_nms(dets, thresh):
  """
  dets has to be a tensor
  """
  if not dets.is_cuda:
    x1 = dets[:, 0]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    scores = dets[:, 4]

    areas = (x2 - x1 + 1) * (y2 - y1 + 1)
    order = scores.sort(0, descending=True)[1]
    # order = torch.from_numpy(np.ascontiguousarray(scores.numpy().argsort()[::-1])).long()

    keep = torch.LongTensor(dets.size(0))
    num_out = torch.LongTensor(1)
    nms.cpu_nms(keep, num_out, dets, order, areas, thresh)

    return keep[:num_out[0]]
  else:
    x1 = dets[:, 0]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    scores = dets[:, 4]

    areas = (x2 - x1 + 1) * (y2 - y1 + 1)
    order = scores.sort(0, descending=True)[1]
    # order = torch.from_numpy(np.ascontiguousarray(scores.cpu().numpy().argsort()[::-1])).long().cuda()

    dets = dets[order].contiguous()

    keep = torch.LongTensor(dets.size(0))
    num_out = torch.LongTensor(1)
    # keep = torch.cuda.LongTensor(dets.size(0))
    # num_out = torch.cuda.LongTensor(1)
    nms.gpu_nms(keep, num_out, dets, thresh)

    return order[keep[:num_out[0]].cuda()].contiguous()
    # return order[keep[:num_out[0]]].contiguous() 
Example #27
Source File: demo_gwhist_ggnn.py    From nmp_qc with MIT License 5 votes vote down vote up
def validate(val_loader, model, criterion, evaluation, logger=None):
    losses = AverageMeter()
    accuracies = AverageMeter()

    # switch to evaluate mode
    model.eval()

    for i, (g, h, e, target) in enumerate(val_loader):

        # Prepare input data
        target = torch.squeeze(target).type(torch.LongTensor)
        if args.cuda:
            g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
        g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)

        # Compute output
        output = model(g, h, e)

        # Logs
        test_loss = criterion(output, target)
        acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])

        losses.update(test_loss.data[0], g.size(0))
        accuracies.update(acc.data[0], g.size(0))

    print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
          .format(acc=accuracies, loss=losses))

    if logger is not None:
        logger.log_value('test_epoch_loss', losses.avg)
        logger.log_value('test_epoch_accuracy', accuracies.avg)

    return accuracies.avg 
Example #28
Source File: lstm_action_planning.py    From dogTorch with MIT License 5 votes vote down vote up
def __init__(self, args):
        super(LstmImg2ActionPlanning, self).__init__()

        assert args.planning_distance == args.output_length

        self.input_length = args.input_length
        self.output_length = args.output_length
        self.sequence_length = args.sequence_length
        self.class_weights = args.dataset.CLASS_WEIGHTS[torch.LongTensor(
            args.imus)]
        self.planning_distance = args.planning_distance
        self.imus = args.imus
        self.hidden_size = args.hidden_size
        self.num_classes = args.num_classes

        self.embedding_input = nn.Linear(args.image_feature * 2,
                                         args.hidden_size)
        self.embedding_target = nn.Linear(args.num_classes * len(args.imus),
                                          args.hidden_size)
        self.lstm = nn.LSTMCell(args.hidden_size, args.hidden_size,
                                args.num_layers)
        self.out = nn.Linear(args.hidden_size, args.imu_feature)

        for i in self.imus:
            setattr(self, 'imu{}'.format(i),
                    nn.Linear(args.imu_feature, args.num_classes)) 
Example #29
Source File: demo_grec_intnet.py    From nmp_qc with MIT License 5 votes vote down vote up
def validate(val_loader, model, criterion, evaluation, logger=None):
    losses = AverageMeter()
    accuracies = AverageMeter()

    # switch to evaluate mode
    model.eval()

    for i, (g, h, e, target) in enumerate(val_loader):

        # Prepare input data
        target = torch.squeeze(target).type(torch.LongTensor)
        if args.cuda:
            g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
        g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)

        # Compute output
        output = model(g, h, e)

        # Logs
        test_loss = criterion(output, target)
        acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])

        losses.update(test_loss.data[0], g.size(0))
        accuracies.update(acc.data[0], g.size(0))

    print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
          .format(acc=accuracies, loss=losses))

    if logger is not None:
        logger.log_value('test_epoch_loss', losses.avg)
        logger.log_value('test_epoch_accuracy', accuracies.avg)

    return accuracies.avg 
Example #30
Source File: models_nearest_neighbor_planning.py    From dogTorch with MIT License 5 votes vote down vote up
def __init__(self, args):
        super(NearestNeighborPlanning, self).__init__()
        self.class_weights = args.dataset.CLASS_WEIGHTS[torch.LongTensor(
            args.imus)]
        self.planning_distance = args.planning_distance
        self.sequence_length = args.sequence_length
        self.train_loader = args.train_loader
        self.num_classes = args.num_classes