Python torch.IntTensor() Examples

The following are 30 code examples of torch.IntTensor(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: test_treenet.py    From treenet with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, *args, **kwargs):
        super(TestTreeNetInterpreterUnit, self).__init__(*args, **kwargs)

        def value(tree):
            out = torch.IntTensor(4)
            out.fill_(0)

            op = tree[0]
            if op == '+':
                out[0] = 1
            elif op == '*':
                out[1] = 1
            elif op == '-':
                out[2] = 1
            else:
                out[3] = op

            return out

        def children(tree):
            return tree[1]

        self.encoder = TreeEncoder(value, children) 
Example #2
Source File: mol_graph.py    From hgraph2graph with MIT License 6 votes vote down vote up
def tensorize(mol_batch, vocab, avocab):
        mol_batch = [MolGraph(x) for x in mol_batch]
        tree_tensors, tree_batchG = MolGraph.tensorize_graph([x.mol_tree for x in mol_batch], vocab)
        graph_tensors, graph_batchG = MolGraph.tensorize_graph([x.mol_graph for x in mol_batch], avocab)
        tree_scope = tree_tensors[-1]
        graph_scope = graph_tensors[-1]

        max_cls_size = max( [len(c) for x in mol_batch for c in x.clusters] )
        cgraph = torch.zeros(len(tree_batchG) + 1, max_cls_size).int()
        for v,attr in tree_batchG.nodes(data=True):
            bid = attr['batch_id']
            offset = graph_scope[bid][0]
            tree_batchG.nodes[v]['inter_label'] = inter_label = [(x + offset, y) for x,y in attr['inter_label']]
            tree_batchG.nodes[v]['cluster'] = cls = [x + offset for x in attr['cluster']]
            tree_batchG.nodes[v]['assm_cands'] = [add(x, offset) for x in attr['assm_cands']]
            cgraph[v, :len(cls)] = torch.IntTensor(cls)

        all_orders = []
        for i,hmol in enumerate(mol_batch):
            offset = tree_scope[i][0]
            order = [(x + offset, y + offset, z) for x,y,z in hmol.order[:-1]] + [(hmol.order[-1][0] + offset, None, 0)]
            all_orders.append(order)

        tree_tensors = tree_tensors[:4] + (cgraph, tree_scope)
        return (tree_batchG, graph_batchG), (tree_tensors, graph_tensors), all_orders 
Example #3
Source File: dataloader.py    From LipNet-PyTorch with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def ctc_collate(batch):
    '''
    Stack samples into CTC style inputs.
    Modified based on default_collate() in PyTorch.
    By Yuan-Hang Zhang.
    '''
    xs, ys, lens, indices = zip(*batch)
    max_len = max(lens)
    x = default_collate(xs)
    x.narrow(2, 0, max_len)
    y = []
    for sub in ys: y += sub
    y = torch.IntTensor(y)
    lengths = torch.IntTensor(lens)
    y_lengths = torch.IntTensor([len(label) for label in ys])
    ids = default_collate(indices)

    return x, y, lengths, y_lengths, ids 
Example #4
Source File: utils.py    From ICDAR-2019-SROIE with MIT License 6 votes vote down vote up
def encode(self, text):
        """Support batch or single str.

        Args:
            text (str or list of str): texts to convert.

        Returns:
            torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
            torch.IntTensor [n]: length of each text.
        """
        if isinstance(text, str):
            text = [
                self.dict[char.lower() if self._ignore_case else char]
                for char in text
            ]
            length = [len(text)]
        elif isinstance(text, collections.Iterable):
            length = [len(s) for s in text]
            text = ''.join(text)
            text, _ = self.encode(text)
        return (torch.IntTensor(text), torch.IntTensor(length)) 
Example #5
Source File: mol_graph.py    From hgraph2graph with MIT License 6 votes vote down vote up
def tensorize(mol_batch, vocab, avocab):
        mol_batch = [MolGraph(x) for x in mol_batch]
        tree_tensors, tree_batchG = MolGraph.tensorize_graph([x.mol_tree for x in mol_batch], vocab)
        graph_tensors, graph_batchG = MolGraph.tensorize_graph([x.mol_graph for x in mol_batch], avocab)
        tree_scope = tree_tensors[-1]
        graph_scope = graph_tensors[-1]

        max_cls_size = max( [len(c) for x in mol_batch for c in x.clusters] )
        cgraph = torch.zeros(len(tree_batchG) + 1, max_cls_size).int()
        for v,attr in tree_batchG.nodes(data=True):
            bid = attr['batch_id']
            offset = graph_scope[bid][0]
            tree_batchG.nodes[v]['inter_label'] = inter_label = [(x + offset, y) for x,y in attr['inter_label']]
            tree_batchG.nodes[v]['cluster'] = cls = [x + offset for x in attr['cluster']]
            tree_batchG.nodes[v]['assm_cands'] = [add(x, offset) for x in attr['assm_cands']]
            cgraph[v, :len(cls)] = torch.IntTensor(cls)

        all_orders = []
        for i,hmol in enumerate(mol_batch):
            offset = tree_scope[i][0]
            order = [(x + offset, y + offset, z) for x,y,z in hmol.order[:-1]] + [(hmol.order[-1][0] + offset, None, 0)]
            all_orders.append(order)

        tree_tensors = tree_tensors[:4] + (cgraph, tree_scope)
        return (tree_batchG, graph_batchG), (tree_tensors, graph_tensors), all_orders 
Example #6
Source File: train.py    From ICDAR-2019-SROIE with MIT License 6 votes vote down vote up
def trainBatch(net, criterion, optimizer):
    data = train_iter.next()
    cpu_images, cpu_texts = data
    batch_size = cpu_images.size(0)
    utils.loadData(image, cpu_images)
    t, l = converter.encode(cpu_texts)
    utils.loadData(text, t)
    utils.loadData(length, l)

    preds = crnn(image)
    preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))
    cost = criterion(preds, text, preds_size, length) / batch_size
    crnn.zero_grad()
    cost.backward()
    optimizer.step()
    return cost 
Example #7
Source File: utils.py    From ConvLab with MIT License 6 votes vote down vote up
def cast_type(var, dtype, use_gpu):
    if use_gpu:
        if dtype == INT:
            var = var.type(th.cuda.IntTensor)
        elif dtype == LONG:
            var = var.type(th.cuda.LongTensor)
        elif dtype == FLOAT:
            var = var.type(th.cuda.FloatTensor)
        else:
            raise ValueError('Unknown dtype')
    else:
        if dtype == INT:
            var = var.type(th.IntTensor)
        elif dtype == LONG:
            var = var.type(th.LongTensor)
        elif dtype == FLOAT:
            var = var.type(th.FloatTensor)
        else:
            raise ValueError('Unknown dtype')
    return var 
Example #8
Source File: main.py    From ICDAR-2019-SROIE with MIT License 6 votes vote down vote up
def predict_this_box(image, model, alphabet):
    converter = utils.strLabelConverter(alphabet)
    transformer = dataset.resizeNormalize((200, 32))
    image = transformer(image)
    if torch.cuda.is_available():
        image = image.cuda()
    image = image.view(1, *image.size())
    image = Variable(image)

    model.eval()
    preds = model(image)

    _, preds = preds.max(2)
    preds = preds.transpose(1, 0).contiguous().view(-1)

    preds_size = Variable(torch.IntTensor([preds.size(0)]))
    raw_pred = converter.decode(preds.data, preds_size.data, raw=True)
    sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
    print('%-30s => %-30s' % (raw_pred, sim_pred))
    return sim_pred 
Example #9
Source File: darknet.py    From pytorch-0.4-yolov3 with MIT License 6 votes vote down vote up
def __init__(self, cfgfile, use_cuda=True):
        super(Darknet, self).__init__()
        self.use_cuda = use_cuda
        self.blocks = parse_cfg(cfgfile)
        self.models = self.create_network(self.blocks) # merge conv, bn,leaky
        self.loss_layers = self.getLossLayers()

        #self.width = int(self.blocks[0]['width'])
        #self.height = int(self.blocks[0]['height'])

        if len(self.loss_layers) > 0:
            last = len(self.loss_layers)-1
            self.anchors = self.loss_layers[last].anchors
            self.num_anchors = self.loss_layers[last].num_anchors
            self.anchor_step = self.loss_layers[last].anchor_step
            self.num_classes = self.loss_layers[last].num_classes

        # default format : major=0, minor=1
        self.header = torch.IntTensor([0,1,0,0])
        self.seen = 0 
Example #10
Source File: tokenizer.py    From crosentgec with GNU General Public License v3.0 6 votes vote down vote up
def tokenize(line, dict, tokenize=tokenize_line, add_if_not_exist=True,
                 consumer=None, append_eos=True, reverse_order=False):
        words = tokenize(line)
        if reverse_order:
            words = list(reversed(words))
        nwords = len(words)
        ids = torch.IntTensor(nwords + 1 if append_eos else nwords)

        for i, word in enumerate(words):
            if add_if_not_exist:
                idx = dict.add_symbol(word)
            else:
                idx = dict.index(word)
            if consumer is not None:
                consumer(word, idx)
            ids[i] = idx
        if append_eos:
            ids[nwords] = dict.eos_index
        return ids 
Example #11
Source File: bleu.py    From crosentgec with GNU General Public License v3.0 6 votes vote down vote up
def add(self, ref, pred):
        if not isinstance(ref, torch.IntTensor):
            raise TypeError('ref must be a torch.IntTensor (got {})'
                            .format(type(ref)))
        if not isinstance(pred, torch.IntTensor):
            raise TypeError('pred must be a torch.IntTensor(got {})'
                            .format(type(pred)))

        # don't match unknown words
        rref = ref.clone()
        assert not rref.lt(0).any()
        rref[rref.eq(self.unk)] = -999

        rref = rref.contiguous().view(-1)
        pred = pred.contiguous().view(-1)

        C.bleu_add(
            ctypes.byref(self.stat),
            ctypes.c_size_t(rref.size(0)),
            ctypes.c_void_p(rref.data_ptr()),
            ctypes.c_size_t(pred.size(0)),
            ctypes.c_void_p(pred.data_ptr()),
            ctypes.c_int(self.pad),
            ctypes.c_int(self.eos)) 
Example #12
Source File: roi_pool.py    From pytorch-FPN with MIT License 6 votes vote down vote up
def forward(self, features, rois):
        batch_size, num_channels, data_height, data_width = features.size()
        num_rois = rois.size()[0]
        output = torch.zeros(num_rois, num_channels, self.pooled_height, self.pooled_width)
        argmax = torch.IntTensor(num_rois, num_channels, self.pooled_height, self.pooled_width).zero_()

        if not features.is_cuda:
            _features = features.permute(0, 2, 3, 1)
            roi_pooling.roi_pooling_forward(self.pooled_height, self.pooled_width, self.spatial_scale,
                                            _features, rois, output)
            # output = output.cuda()
        else:
            output = output.cuda()
            argmax = argmax.cuda()
            roi_pooling.roi_pooling_forward_cuda(self.pooled_height, self.pooled_width, self.spatial_scale,
                                                 features, rois, output, argmax)
            self.output = output
            self.argmax = argmax
            self.rois = rois
            self.feature_size = features.size()

        return output 
Example #13
Source File: w2l_decoder.py    From fairseq with MIT License 6 votes vote down vote up
def decode(self, emissions):
        B, T, N = emissions.size()
        hypos = []
        if self.asg_transitions is None:
            transitions = torch.FloatTensor(N, N).zero_()
        else:
            transitions = torch.FloatTensor(self.asg_transitions).view(N, N)
        viterbi_path = torch.IntTensor(B, T)
        workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N))
        CpuViterbiPath.compute(
            B,
            T,
            N,
            get_data_ptr_as_bytes(emissions),
            get_data_ptr_as_bytes(transitions),
            get_data_ptr_as_bytes(viterbi_path),
            get_data_ptr_as_bytes(workspace),
        )
        return [
            [{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}]
            for b in range(B)
        ] 
Example #14
Source File: roi_pool.py    From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License 6 votes vote down vote up
def forward(self, features, rois):
        batch_size, num_channels, data_height, data_width = features.size()
        num_rois = rois.size()[0]
        output = torch.zeros(num_rois, num_channels, self.pooled_height, self.pooled_width)
        argmax = torch.IntTensor(num_rois, num_channels, self.pooled_height, self.pooled_width).zero_()

        if not features.is_cuda:
            _features = features.permute(0, 2, 3, 1)
            roi_pooling.roi_pooling_forward(self.pooled_height, self.pooled_width, self.spatial_scale,
                                            _features, rois, output)
            # output = output.cuda()
        else:
            output = output.cuda()
            argmax = argmax.cuda()
            roi_pooling.roi_pooling_forward_cuda(self.pooled_height, self.pooled_width, self.spatial_scale,
                                                 features, rois, output, argmax)
            self.output = output
            self.argmax = argmax
            self.rois = rois
            self.feature_size = features.size()

        return output 
Example #15
Source File: rod_align.py    From Grid-Anchor-based-Image-Cropping-Pytorch with MIT License 6 votes vote down vote up
def forward(ctx, features, rois, aligned_width, aligned_height, spatial_scale):
        batch_size, num_channels, data_height, data_width = features.size()
        ctx.save_for_backward(rois,  
                              torch.IntTensor([int(batch_size),
                                               int(num_channels),
                                               int(data_height),
                                               int(data_width),
                                               int(aligned_width), 
                                               int(aligned_height)]),
                              torch.FloatTensor([float(spatial_scale)]))

        num_rois = rois.size(0)

        output = features.new(num_rois, 
                              num_channels, 
                              int(aligned_height), 
                              int(aligned_width)).zero_()
        
        rod_align_api.forward(int(aligned_height),
                              int(aligned_width),
                              float(spatial_scale), 
                              features,
                              rois, output)

        return output 
Example #16
Source File: roi_align.py    From Grid-Anchor-based-Image-Cropping-Pytorch with MIT License 6 votes vote down vote up
def forward(ctx, features, rois, aligned_height, aligned_width, spatial_scale):
        batch_size, num_channels, data_height, data_width = features.size()
        ctx.save_for_backward(rois, 
                              torch.IntTensor([int(batch_size),
                                               int(num_channels),
                                               int(data_height),
                                               int(data_width),
                                               int(aligned_height), 
                                               int(aligned_width)]),
                              torch.FloatTensor([float(spatial_scale)]))

        num_rois = rois.size(0)

        output = features.new(num_rois, 
                              num_channels, 
                              int(aligned_height), 
                              int(aligned_width)).zero_()
        
        roi_align_api.forward(int(aligned_height),
                              int(aligned_width),
                              float(spatial_scale), 
                              features,
                              rois,
                              output)
        return output 
Example #17
Source File: psroi_pool.py    From pytorch-detect-to-track with MIT License 6 votes vote down vote up
def forward(ctx, features, rois):
        batch_size, num_channels, data_height, data_width = features.size()
        num_rois = rois.size()[0]
        output = torch.zeros(num_rois, ctx.output_dim, ctx.pooled_height, ctx.pooled_width)
        mappingchannel = torch.IntTensor(num_rois, ctx.output_dim, ctx.pooled_height, ctx.pooled_width).zero_()
        output = output.cuda()
        mappingchannel = mappingchannel.cuda()
        psroi_pooling.psroi_pooling_forward_cuda(ctx.pooled_height, ctx.pooled_width, ctx.spatial_scale,
                                                 ctx.group_size, ctx.output_dim,
                                                 features, rois, output, mappingchannel)
        ctx.output = output
        ctx.mappingchannel = mappingchannel
        ctx.rois = rois
        ctx.feature_size = features.size()

        return output 
Example #18
Source File: mnist.py    From Text-Recognition with GNU Lesser General Public License v2.1 6 votes vote down vote up
def getitem(self, index):

		size = self.batch_size
		images, targets = self.batch(size)
		images = [torch.FloatTensor(np.array(Image.fromarray(img).convert('RGB').resize(size=(int(32*img.shape[1]/img.shape[0]), 32))).transpose(2, 0, 1)) for img in images]

		seq_len = [torch.IntTensor([target_i.shape[0]]) for target_i in targets]

		seq = []
		for i in range(len(targets)):

			seq.append(targets[i].int())

		images = [image.unsqueeze(0) for image in images]

		sample = {"img": images, "seq": seq, "seq_len": seq_len, "aug": True}


		return sample 
Example #19
Source File: utils.py    From Text-Recognition with GNU Lesser General Public License v2.1 6 votes vote down vote up
def encode(self, text):
		"""Support batch or single str.

		Args:
			text (str or list of str): texts to convert.

		Returns:
			torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
			torch.IntTensor [n]: length of each text.
		"""
		if isinstance(text, str):
			text = [
				self.dict[char.lower() if self._ignore_case else char]
				for char in text
			]
			length = [len(text)]
		elif isinstance(text, collections.Iterable):
			length = [len(s) for s in text]
			text = ''.join(text)
			text, _ = self.encode(text)
		return (torch.IntTensor(text), torch.IntTensor(length)) 
Example #20
Source File: glow.py    From tn2-wg with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def forward(self, forward_input):
        audio, spect = forward_input
        audio = self.start(audio)

        for i in range(self.n_layers):
            acts = fused_add_tanh_sigmoid_multiply(
                self.in_layers[i](audio),
                self.cond_layers[i](spect),
                torch.IntTensor([self.n_channels]))

            res_skip_acts = self.res_skip_layers[i](acts)
            if i < self.n_layers - 1:
                audio = res_skip_acts[:,:self.n_channels,:] + audio
                skip_acts = res_skip_acts[:,self.n_channels:,:]
            else:
                skip_acts = res_skip_acts

            if i == 0:
                output = skip_acts
            else:
                output = skip_acts + output
        return self.end(output) 
Example #21
Source File: test_attention.py    From ITDD with MIT License 6 votes vote down vote up
def test_masked_global_attention(self):

        source_lengths = torch.IntTensor([7, 3, 5, 2])
        # illegal_weights_mask = torch.ByteTensor([
        #     [0, 0, 0, 0, 0, 0, 0],
        #     [0, 0, 0, 1, 1, 1, 1],
        #     [0, 0, 0, 0, 0, 1, 1],
        #     [0, 0, 1, 1, 1, 1, 1]])

        batch_size = source_lengths.size(0)
        dim = 20

        memory_bank = Variable(torch.randn(batch_size,
                                           source_lengths.max(), dim))
        hidden = Variable(torch.randn(batch_size, dim))

        attn = onmt.modules.GlobalAttention(dim)

        _, alignments = attn(hidden, memory_bank,
                             memory_lengths=source_lengths)
        # TODO: fix for pytorch 0.3
        # illegal_weights = alignments.masked_select(illegal_weights_mask)

        # self.assertEqual(0.0, illegal_weights.data.sum()) 
Example #22
Source File: bleu.py    From fairseq with MIT License 6 votes vote down vote up
def add(self, ref, pred):
        if not isinstance(ref, torch.IntTensor):
            raise TypeError('ref must be a torch.IntTensor (got {})'
                            .format(type(ref)))
        if not isinstance(pred, torch.IntTensor):
            raise TypeError('pred must be a torch.IntTensor(got {})'
                            .format(type(pred)))

        # don't match unknown words
        rref = ref.clone()
        assert not rref.lt(0).any()
        rref[rref.eq(self.unk)] = -999

        rref = rref.contiguous().view(-1)
        pred = pred.contiguous().view(-1)

        C.bleu_add(
            ctypes.byref(self.stat),
            ctypes.c_size_t(rref.size(0)),
            ctypes.c_void_p(rref.data_ptr()),
            ctypes.c_size_t(pred.size(0)),
            ctypes.c_void_p(pred.data_ptr()),
            ctypes.c_int(self.pad),
            ctypes.c_int(self.eos)) 
Example #23
Source File: utils.py    From fairseq with MIT License 6 votes vote down vote up
def parse_alignment(line):
    """
    Parses a single line from the alingment file.

    Args:
        line (str): String containing the alignment of the format:
            <src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> ..
            <src_idx_m>-<tgt_idx_m>. All indices are 0 indexed.

    Returns:
        torch.IntTensor: packed alignments of shape (2 * m).
    """
    alignments = line.strip().split()
    parsed_alignment = torch.IntTensor(2 * len(alignments))
    for idx, alignment in enumerate(alignments):
        src_idx, tgt_idx = alignment.split("-")
        parsed_alignment[2 * idx] = int(src_idx)
        parsed_alignment[2 * idx + 1] = int(tgt_idx)
    return parsed_alignment 
Example #24
Source File: pytorch_util.py    From rlgraph with Apache License 2.0 6 votes vote down vote up
def pytorch_one_hot(index_tensor, depth=0):
    """
    One-hot utility function for PyTorch.

    Args:
        index_tensor (torch.Tensor): The input to be one-hot.
        depth (int): The max. number to be one-hot encoded (size of last rank).

    Returns:
        torch.Tensor: The one-hot encoded equivalent of the input array.
    """
    if get_backend() == "pytorch":
        # Do converts.
        if isinstance(index_tensor, torch.FloatTensor):
            index_tensor = index_tensor.long()
        if isinstance(index_tensor, torch.IntTensor):
            index_tensor = index_tensor.long()

        out = torch.zeros(index_tensor.size() + torch.Size([depth]))
        dim = len(index_tensor.size())
        index = index_tensor.unsqueeze(-1)
        return out.scatter_(dim, index, 1) 
Example #25
Source File: nnutils.py    From hgraph2graph with MIT License 5 votes vote down vote up
def create_pad_tensor(alist):
    max_len = max([len(a) for a in alist]) + 1
    for a in alist:
        pad_len = max_len - len(a)
        a.extend([0] * pad_len)
    return torch.IntTensor(alist) 
Example #26
Source File: weights_search.py    From translate with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def evaluate_weights(scores_info, feature_weights, length_penalty):
    scorer = bleu.Scorer(
        vocab_constants.PAD_ID, vocab_constants.EOS_ID, vocab_constants.UNK_ID
    )

    for example in scores_info:
        weighted_scores = (example["scores"] * feature_weights).sum(axis=1)
        weighted_scores /= (example["tgt_len"] ** length_penalty) + 1e-12
        top_hypo_ind = np.argmax(weighted_scores)
        top_hypo = example["hypos"][top_hypo_ind]
        ref = example["target_tokens"]
        scorer.add(torch.IntTensor(ref), torch.IntTensor(top_hypo))

    return scorer.score() 
Example #27
Source File: checkpoint.py    From translate with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def is_integer_tensor(tensor: torch.Tensor) -> bool:
    return (
        isinstance(tensor, torch.ByteTensor)
        or isinstance(tensor, torch.CharTensor)
        or isinstance(tensor, torch.ShortTensor)
        or isinstance(tensor, torch.IntTensor)
        or isinstance(tensor, torch.LongTensor)
    ) 
Example #28
Source File: mol_graph.py    From hgraph2graph with MIT License 5 votes vote down vote up
def tensorize_graph(graph_batch, vocab):
        fnode,fmess = [None],[(0,0,0,0)] 
        agraph,bgraph = [[]], [[]] 
        scope = []
        edge_dict = {}
        all_G = []

        for bid,G in enumerate(graph_batch):
            offset = len(fnode)
            scope.append( (offset, len(G)) )
            G = nx.convert_node_labels_to_integers(G, first_label=offset)
            all_G.append(G)
            fnode.extend( [None for v in G.nodes] )

            for v, attr in G.nodes(data='label'):
                G.nodes[v]['batch_id'] = bid
                fnode[v] = vocab[attr]
                agraph.append([])

            for u, v, attr in G.edges(data='label'):
                if type(attr) is tuple:
                    fmess.append( (u, v, attr[0], attr[1]) )
                else:
                    fmess.append( (u, v, attr, 0) )
                edge_dict[(u, v)] = eid = len(edge_dict) + 1
                G[u][v]['mess_idx'] = eid
                agraph[v].append(eid)
                bgraph.append([])

            for u, v in G.edges:
                eid = edge_dict[(u, v)]
                for w in G.predecessors(u):
                    if w == v: continue
                    bgraph[eid].append( edge_dict[(w, u)] )

        fnode[0] = fnode[1]
        fnode = torch.IntTensor(fnode)
        fmess = torch.IntTensor(fmess)
        agraph = create_pad_tensor(agraph)
        bgraph = create_pad_tensor(bgraph)
        return (fnode, fmess, agraph, bgraph, scope), nx.union_all(all_G) 
Example #29
Source File: TestOneImageR.py    From Text-Recognition with GNU Lesser General Public License v2.1 5 votes vote down vote up
def test_one_image_with_cropped(self, image, path=None, save=True):

		self.profiler(self.start_testing)

		with torch.no_grad():

			resized_img = self.test_data_loader.resize([np.array(image)], fixed='not_fixed')[0][0]

			if self.cuda:
				resized_img = resized_img.cuda()

			out = self.profiler(self.model, resized_img)
			# print(out.shape)
			_, preds = out.max(2)
			preds = preds.transpose(1, 0).contiguous().view(-1)
			preds_size = torch.IntTensor([out.size(0)]).int()
			predicted_final_label = self.converter.decode(preds.data, preds_size.data, raw=False)
			# print('Predicted Label:',predicted_final_label)
			if predicted_final_label == '':
				predicted_final_label = 'NONE'
			# return out
			if save:
				path_to_save = path + '/' + predicted_final_label + '.png'
				plt.imsave(path_to_save, resized_img.cpu().data.numpy()[0][0]/255)
			else:
				return predicted_final_label 
Example #30
Source File: data.py    From retinanet-examples with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def collate_fn(self, batch):
        'Create batch from multiple samples'

        if self.training:
            data, targets = zip(*batch)
            max_det = max([t.size()[0] for t in targets])
            targets = [torch.cat([t, torch.ones([max_det - t.size()[0], 6]) * -1]) for t in targets]
            targets = torch.stack(targets, 0)
        else:
            data, indices, ratios = zip(*batch)

        # Pad data to match max batch dimensions
        sizes = [d.size()[-2:] for d in data]
        w, h = (max(dim) for dim in zip(*sizes))

        data_stack = []
        for datum in data:
            pw, ph = w - datum.size()[-2], h - datum.size()[-1]
            data_stack.append(
                F.pad(datum, (0, ph, 0, pw)) if max(ph, pw) > 0 else datum)

        data = torch.stack(data_stack)

        if self.training:
            return data, targets

        ratios = torch.FloatTensor(ratios).view(-1, 1, 1)
        return data, torch.IntTensor(indices), ratios