Python torch.is_tensor() Examples

The following are 30 code examples of torch.is_tensor(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: functional.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def class_balanced_weight(beta, samples_per_class):
    assert 0 <= beta < 1, 'Wrong rang of beta {}'.format(beta)
    if not isinstance(samples_per_class, np.ndarray):
        if isinstance(samples_per_class, (list, tuple)):
            samples_per_class = np.array(samples_per_class)
        elif torch.is_tensor(samples_per_class):
            samples_per_class = samples_per_class.numpy()
        else:
            raise NotImplementedError(
                'Type of samples_per_class should be {}, {} or {} but got {}'.format(
                    (list, tuple), np.ndarray, torch.Tensor, type(samples_per_class)))
    assert isinstance(samples_per_class, np.ndarray) \
        and isinstance(beta, numbers.Number)

    balanced_matrix = (1 - beta) / (1 - np.power(beta, samples_per_class))
    return torch.Tensor(balanced_matrix) 
Example #2
Source File: show_result.py    From DenseMatchingBenchmark with MIT License 6 votes vote down vote up
def get_gray_and_color_flow(self, Flow, max_rad=None):
        assert isinstance(Flow, (np.ndarray, torch.Tensor))

        if torch.is_tensor(Flow):
            Flow = Flow.clone().detach().cpu()

        if len(Flow.shape) == 4:
            Flow = Flow[0, :, :, :]

        # [2, H, W] -> [H, W, 2]
        Flow = chw_to_hwc(Flow)
        # [H, W, 2]
        grayFlow = Flow.copy()
        # [H, W, 3]
        colorFlow = flow_to_color(Flow.copy(), max_rad=max_rad)

        return grayFlow, colorFlow 
Example #3
Source File: mixed_lipschitz.py    From residual-flows with MIT License 6 votes vote down vote up
def normalize_u(u, codomain, out=None):
    if not torch.is_tensor(codomain) and codomain == 2:
        u = F.normalize(u, p=2, dim=0, out=out)
    elif codomain == float('inf'):
        u = projmax_(u)
    else:
        uabs = torch.abs(u)
        uph = u / uabs
        uph[torch.isnan(uph)] = 1
        uabs = uabs / torch.max(uabs)
        uabs = uabs**(codomain - 1)
        if codomain == 1:
            u = uph * uabs / vector_norm(uabs, float('inf'))
        else:
            u = uph * uabs / vector_norm(uabs, codomain / (codomain - 1))
    return u 
Example #4
Source File: dataset_factory.py    From ssds.pytorch with MIT License 6 votes vote down vote up
def detection_collate(batch):
    """Custom collate fn for dealing with batches of images that have a different
    number of associated object annotations (bounding boxes).

    Arguments:
        batch: (tuple) A tuple of tensor images and lists of annotations

    Return:
        A tuple containing:
            1) (tensor) batch of images stacked on their 0 dim
            2) (list of tensors) annotations for a given image are stacked on 0 dim
    """
    targets = []
    imgs = []
    for _, sample in enumerate(batch):
        for _, tup in enumerate(sample):
            if torch.is_tensor(tup):
                imgs.append(tup)
            elif isinstance(tup, type(np.empty(0))):
                annos = torch.from_numpy(tup).float()
                targets.append(annos)

    return (torch.stack(imgs, 0), targets) 
Example #5
Source File: fixed_points.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def __call__(self, data):
        num_nodes = data.num_nodes

        if self.replace:
            choice = np.random.choice(num_nodes, self.num, replace=True)
            choice = torch.from_numpy(choice).to(torch.long)
        elif not self.allow_duplicates:
            choice = torch.randperm(num_nodes)[:self.num]
        else:
            choice = torch.cat([
                torch.randperm(num_nodes)
                for _ in range(math.ceil(self.num / num_nodes))
            ], dim=0)[:self.num]

        for key, item in data:
            if bool(re.search('edge', key)):
                continue
            if torch.is_tensor(item) and item.size(0) == num_nodes:
                data[key] = item[choice]

        return data 
Example #6
Source File: utils.py    From OpenChem with MIT License 6 votes vote down vote up
def move_to_cuda(sample):
    # copy-pasted from
    # https://github.com/pytorch/fairseq/blob/master/fairseq/utils.py
    if len(sample) == 0:
        return {}

    def _move_to_cuda(maybe_tensor):
        if torch.is_tensor(maybe_tensor):
            return maybe_tensor.cuda()
        elif isinstance(maybe_tensor, dict):
            return {
                key: _move_to_cuda(value)
                for key, value in maybe_tensor.items()
            }
        elif isinstance(maybe_tensor, list):
            return [_move_to_cuda(x) for x in maybe_tensor]
        else:
            return maybe_tensor

    return _move_to_cuda(sample) 
Example #7
Source File: state.py    From Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch with MIT License 6 votes vote down vote up
def __merge_states(self, state_list, type_state='hidden'):
        if state_list is None:
            return None
        if isinstance(state_list[0], State):
            return State().from_list(state_list)
        if isinstance(state_list[0], tuple):
            return tuple([self.__merge_states(s, type_state) for s in zip(*state_list)])
        else:
            if torch.is_tensor(state_list[0]):
                if type_state == 'hidden':
                    batch_dim = 0 if state_list[0].dim() < 3 else 1
                else:
                    batch_dim = 0 if self.batch_first else 1
                return torch.cat(state_list, batch_dim)
            else:
                assert state_list[1:] == state_list[:-1]  # all items are equal
                return state_list[0] 
Example #8
Source File: in_memory_dataset.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def get(self, idx):
        data = self.data.__class__()

        if hasattr(self.data, '__num_nodes__'):
            data.num_nodes = self.data.__num_nodes__[idx]

        for key in self.data.keys:
            item, slices = self.data[key], self.slices[key]
            start, end = slices[idx].item(), slices[idx + 1].item()
            # print(slices[idx], slices[idx + 1])
            if torch.is_tensor(item):
                s = list(repeat(slice(None), item.dim()))
                s[self.data.__cat_dim__(key, item)] = slice(start, end)
            elif start + 1 == end:
                s = slices[start]
            else:
                s = slice(start, end)
            data[key] = item[s]
        return data 
Example #9
Source File: utils.py    From ScenarioMeta with MIT License 6 votes vote down vote up
def forward(self, nodes):
        if torch.is_tensor(nodes):
            if self.neighbor_dict is not None:
                neighbors = [random.sample(self.neighbor_dict[idx.item()], self.max_degree) if len(
                    self.neighbor_dict[idx.item()]) > self.max_degree else self.neighbor_dict[idx.item()] for idx in
                             nodes]
        else:
            if self.neighbor_dict is not None:
                neighbors = [random.sample(self.neighbor_dict[idx], self.max_degree) if len(
                    self.neighbor_dict[idx]) > self.max_degree else self.neighbor_dict[idx] for idx in nodes]
            nodes = torch.tensor(nodes, dtype=torch.long, device=self.flag.device)
        if self.neighbor_dict is not None:
            degrees = torch.tensor(list(map(len, neighbors)), dtype=torch.long, device=self.flag.device)
            neighbors = list2tensor(neighbors, self.padding_idx, device=self.flag.device)
            return nodes, neighbors, degrees
        else:
            return (nodes,) 
Example #10
Source File: wider_voc.py    From hand-detection.PyTorch with MIT License 6 votes vote down vote up
def detection_collate(batch):
    """Custom collate fn for dealing with batches of images that have a different
    number of associated object annotations (bounding boxes).

    Arguments:
        batch: (tuple) A tuple of tensor images and lists of annotations

    Return:
        A tuple containing:
            1) (tensor) batch of images stacked on their 0 dim
            2) (list of tensors) annotations for a given image are stacked on 0 dim
    """
    targets = []
    imgs = []
    for _, sample in enumerate(batch):
        for _, tup in enumerate(sample):
            if torch.is_tensor(tup):
                imgs.append(tup)
            elif isinstance(tup, type(np.empty(0))):
                annos = torch.from_numpy(tup).float()
                targets.append(annos)

    return (torch.stack(imgs, 0), targets) 
Example #11
Source File: data.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def __apply__(self, item, func):
        if torch.is_tensor(item):
            return func(item)
        elif isinstance(item, SparseTensor):
            # Not all apply methods are supported for `SparseTensor`, e.g.,
            # `contiguous()`. We can get around it by capturing the exception.
            try:
                return func(item)
            except AttributeError:
                return item
        elif isinstance(item, (tuple, list)):
            return [self.__apply__(v, func) for v in item]
        elif isinstance(item, dict):
            return {k: self.__apply__(v, func) for k, v in item.items()}
        else:
            return item 
Example #12
Source File: scatter_gather.py    From CornerNet-Lite-Pytorch with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def scatter(inputs, target_gpus, dim=0, chunk_sizes=None):
    r"""
    Slices variables into approximately equal chunks and
    distributes them across given GPUs. Duplicates
    references to objects that are not variables. Does not
    support Tensors.
    """
    def scatter_map(obj):
        if isinstance(obj, Variable):
            return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
        assert not torch.is_tensor(obj), "Tensors not supported in scatter."
        if isinstance(obj, tuple):
            return list(zip(*map(scatter_map, obj)))
        if isinstance(obj, list):
            return list(map(list, zip(*map(scatter_map, obj))))
        if isinstance(obj, dict):
            return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
        return [obj for targets in target_gpus]

    return scatter_map(inputs) 
Example #13
Source File: policy_util.py    From ConvLab with MIT License 6 votes vote down vote up
def calc_pdparam(state, algorithm, body):
    '''
    Prepare the state and run algorithm.calc_pdparam to get pdparam for action_pd
    @param tensor:state For pdparam = net(state)
    @param algorithm The algorithm containing self.net
    @param body Body which links algorithm to the env which the action is for
    @returns tensor:pdparam
    @example

    pdparam = calc_pdparam(state, algorithm, body)
    action_pd = ActionPD(logits=pdparam)  # e.g. ActionPD is Categorical
    action = action_pd.sample()
    '''
    if not torch.is_tensor(state):  # dont need to cast from numpy
        state = guard_tensor(state, body)
        state = state.to(algorithm.net.device)
    pdparam = algorithm.calc_pdparam(state)
    return pdparam 
Example #14
Source File: dictionary.py    From crosentgec with GNU General Public License v3.0 6 votes vote down vote up
def string(self, tensor, bpe_symbol=None, escape_unk=False):
        """Helper for converting a tensor of token indices to a string.

        Can optionally remove BPE symbols or escape <unk> words.
        """
        if torch.is_tensor(tensor) and tensor.dim() == 2:
            return '\n'.join(self.string(t) for t in tensor)

        def token_string(i):
            if i == self.unk():
                return self.unk_string(escape_unk)
            else:
                return self[i]

        sent = ' '.join(token_string(i) for i in tensor if i != self.eos())
        if bpe_symbol is not None:
            sent = (sent + ' ').replace(bpe_symbol, '').rstrip()
        return sent 
Example #15
Source File: utils.py    From crosentgec with GNU General Public License v3.0 6 votes vote down vote up
def move_to_cuda(sample):
    if len(sample) == 0:
        return {}

    def _move_to_cuda(maybe_tensor):
        if torch.is_tensor(maybe_tensor):
            return maybe_tensor.cuda()
        elif isinstance(maybe_tensor, dict):
            return {
                key: _move_to_cuda(value)
                for key, value in maybe_tensor.items()
            }
        elif isinstance(maybe_tensor, list):
            return [_move_to_cuda(x) for x in maybe_tensor]
        else:
            return maybe_tensor

    return _move_to_cuda(sample) 
Example #16
Source File: utils.py    From VTuber_Unity with MIT License 6 votes vote down vote up
def flip(tensor, is_label=False):
    """Flip an image or a set of heatmaps left-right

    Arguments:
        tensor {numpy.array or torch.tensor} -- [the input image or heatmaps]

    Keyword Arguments:
        is_label {bool} -- [denote wherever the input is an image or a set of heatmaps ] (default: {False})
    """
    if not torch.is_tensor(tensor):
        tensor = torch.from_numpy(tensor)

    if is_label:
        tensor = shuffle_lr(tensor).flip(tensor.ndimension() - 1)
    else:
        tensor = tensor.flip(tensor.ndimension() - 1)

    return tensor

# From pyzolib/paths.py (https://bitbucket.org/pyzo/pyzolib/src/tip/paths.py) 
Example #17
Source File: th.py    From EMANet with GNU General Public License v3.0 5 votes vote down vote up
def as_numpy(obj):
    if isinstance(obj, collections.Sequence):
        return [as_numpy(v) for v in obj]
    elif isinstance(obj, collections.Mapping):
        return {k: as_numpy(v) for k, v in obj.items()}
    elif isinstance(obj, Variable):
        return obj.data.cpu().numpy()
    elif torch.is_tensor(obj):
        return obj.cpu().numpy()
    else:
        return np.array(obj) 
Example #18
Source File: eval.py    From DenseMatchingBenchmark with MIT License 5 votes vote down vote up
def do_evaluation(est_disp, gt_disp, lb, ub):
    """
    Do pixel error evaluation. (See KITTI evaluation protocols for details.)
    Args:
        est_disp, (Tensor): estimated disparity map, in [BatchSize, Channel, Height, Width] or
            [BatchSize, Height, Width] or [Height, Width] layout
        gt_disp, (Tensor): ground truth disparity map, in [BatchSize, Channel, Height, Width] or
            [BatchSize, Height, Width] or [Height, Width] layout
        lb, (scalar): the lower bound of disparity you want to mask out
        ub, (scalar): the upper bound of disparity you want to mask out

    Returns:
        error_dict (dict): the error of 1px, 2px, 3px, 5px, in percent,
            range [0,100] and average error epe
    """
    error_dict = {}
    if est_disp is None:
        warnings.warn('Estimated disparity map is None')
        return error_dict
    if gt_disp is None:
        warnings.warn('Reference ground truth disparity map is None')
        return error_dict

    if torch.is_tensor(est_disp):
        est_disp = est_disp.clone().cpu()

    if torch.is_tensor(gt_disp):
        gt_disp = gt_disp.clone().cpu()

    error_dict = calc_error(est_disp, gt_disp, lb=lb, ub=ub)

    return error_dict 
Example #19
Source File: eval.py    From DenseMatchingBenchmark with MIT License 5 votes vote down vote up
def do_evaluation(est_flow, gt_flow, sparse=False):
    """
    Do pixel error evaluation. (See KITTI evaluation protocols for details.)
    Args:
        est_flow, (Tensor): estimated flow map, in [BatchSize, 2, Height, Width] or
            [2, Height, Width] layout
        gt_flow, (Tensor): ground truth flow map, in [BatchSize, 2, Height, Width] or
            [2, Height, Width]layout
        sparse, (bool): whether the given flow is sparse, default False

    Returns:
        error_dict (dict): the error of 1px, 2px, 3px, 5px, in percent,
            range [0,100] and average error epe
    """
    error_dict = {}
    if est_flow is None:
        warnings.warn('Estimated flow map is None')
        return error_dict
    if gt_flow is None:
        warnings.warn('Reference ground truth flow map is None')
        return error_dict

    if torch.is_tensor(est_flow):
        est_flow = est_flow.clone().cpu()

    if torch.is_tensor(gt_flow):
        gt_flow = gt_flow.clone().cpu()

    error_dict = calc_error(est_flow, gt_flow, sparse=sparse)

    return error_dict 
Example #20
Source File: utils.py    From crosentgec with GNU General Public License v3.0 5 votes vote down vote up
def convert_state_dict_type(state_dict, ttype=torch.FloatTensor):
    if isinstance(state_dict, dict):
        cpu_dict = OrderedDict()
        for k, v in state_dict.items():
            cpu_dict[k] = convert_state_dict_type(v)
        return cpu_dict
    elif isinstance(state_dict, list):
        return [convert_state_dict_type(v) for v in state_dict]
    elif torch.is_tensor(state_dict):
        return state_dict.type(ttype)
    else:
        return state_dict 
Example #21
Source File: mixed_lipschitz.py    From residual-flows with MIT License 5 votes vote down vote up
def compute_domain_codomain(self):
        if torch.is_tensor(self.domain):
            domain = asym_squash(self.domain)
            codomain = asym_squash(self.codomain)
        else:
            domain, codomain = self.domain, self.codomain
        return domain, codomain 
Example #22
Source File: torchloader.py    From mxbox with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def default_collate(batch):
    "Puts each data field into a tensor with outer dimension batch size"
    if torch.is_tensor(batch[0]):
        out = None
        if _use_shared_memory:
            # If we're in a background process, concatenate directly into a
            # shared memory tensor to avoid an extra copy
            numel = sum([x.numel() for x in batch])
            storage = batch[0].storage()._new_shared(numel)
            out = batch[0].new(storage)
        return torch.stack(batch, 0, out=out)
    elif type(batch[0]).__module__ == 'numpy':
        elem = batch[0]
        if type(elem).__name__ == 'ndarray':
            return torch.stack([torch.from_numpy(b) for b in batch], 0)
        if elem.shape == ():  # scalars
            py_type = float if elem.dtype.name.startswith('float') else int
            return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
    elif isinstance(batch[0], int):
        return torch.LongTensor(batch)
    elif isinstance(batch[0], float):
        return torch.DoubleTensor(batch)
    elif isinstance(batch[0], string_classes):
        return batch
    elif isinstance(batch[0], collections.Mapping):
        return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
    elif isinstance(batch[0], collections.Sequence):
        transposed = zip(*batch)
        return [default_collate(samples) for samples in transposed]

    raise TypeError(("batch must contain tensors, numbers, dicts or lists; found {}"
                     .format(type(batch[0])))) 
Example #23
Source File: mixed_lipschitz.py    From residual-flows with MIT License 5 votes vote down vote up
def extra_repr(self):
        domain, codomain = self.compute_domain_codomain()
        return (
            'in_features={}, out_features={}, bias={}'
            ', coeff={}, domain={:.2f}, codomain={:.2f}, n_iters={}, atol={}, rtol={}, learnable_ord={}'.format(
                self.in_features, self.out_features, self.bias is not None, self.coeff, domain, codomain,
                self.n_iterations, self.atol, self.rtol, torch.is_tensor(self.domain)
            )
        ) 
Example #24
Source File: mixed_lipschitz.py    From residual-flows with MIT License 5 votes vote down vote up
def compute_domain_codomain(self):
        if torch.is_tensor(self.domain):
            domain = asym_squash(self.domain)
            codomain = asym_squash(self.codomain)
        else:
            domain, codomain = self.domain, self.codomain
        return domain, codomain 
Example #25
Source File: _utils.py    From UnsupervisedGeometryAwareRepresentationLearning with GNU General Public License v3.0 5 votes vote down vote up
def convert_tensor(input_, device=None):
    if torch.is_tensor(input_):
        if device:
            input_ = input_.to(device=device)
        return input_
    elif isinstance(input_, string_classes):
        return input_
    elif isinstance(input_, collections.Mapping):
        return {k: convert_tensor(sample, device=device) for k, sample in input_.items()}
    elif isinstance(input_, collections.Sequence):
        return [convert_tensor(sample, device=device) for sample in input_]
    else:
        raise TypeError(("input must contain tensors, dicts or lists; found {}"
                         .format(type(input_)))) 
Example #26
Source File: data_parallel.py    From EMANet with GNU General Public License v3.0 5 votes vote down vote up
def async_copy_to(obj, dev, main_stream=None):
    if torch.is_tensor(obj):
        obj = Variable(obj)
    if isinstance(obj, Variable):
        v = obj.cuda(dev, async=True)
        if main_stream is not None:
            v.data.record_stream(main_stream)
        return v
    elif isinstance(obj, collections.Mapping):
        return {k: async_copy_to(o, dev, main_stream) for k, o in obj.items()}
    elif isinstance(obj, collections.Sequence):
        return [async_copy_to(o, dev, main_stream) for o in obj]
    else:
        return obj 
Example #27
Source File: th.py    From EMANet with GNU General Public License v3.0 5 votes vote down vote up
def mark_volatile(obj):
    if torch.is_tensor(obj):
        obj = Variable(obj)
    if isinstance(obj, Variable):
        obj.no_grad = True
        return obj
    elif isinstance(obj, collections.Mapping):
        return {k: mark_volatile(o) for k, o in obj.items()}
    elif isinstance(obj, collections.Sequence):
        return [mark_volatile(o) for o in obj]
    else:
        return obj 
Example #28
Source File: train_toy.py    From residual-flows with MIT License 5 votes vote down vote up
def get_ords(model):
    ords = []
    for m in model.modules():
        if isinstance(m, base_layers.InducedNormConv2d) or isinstance(m, base_layers.InducedNormLinear):
            domain, codomain = m.compute_domain_codomain()
            if torch.is_tensor(domain):
                domain = domain.item()
            if torch.is_tensor(codomain):
                codomain = codomain.item()
            ords.append(domain)
            ords.append(codomain)
    return ords 
Example #29
Source File: dataloader.py    From EMANet with GNU General Public License v3.0 5 votes vote down vote up
def default_collate(batch):
    "Puts each data field into a tensor with outer dimension batch size"

    error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
    elem_type = type(batch[0])
    if torch.is_tensor(batch[0]):
        out = None
        if _use_shared_memory:
            # If we're in a background process, concatenate directly into a
            # shared memory tensor to avoid an extra copy
            numel = sum([x.numel() for x in batch])
            storage = batch[0].storage()._new_shared(numel)
            out = batch[0].new(storage)
        return torch.stack(batch, 0, out=out)
    elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
            and elem_type.__name__ != 'string_':
        elem = batch[0]
        if elem_type.__name__ == 'ndarray':
            # array of string classes and object
            if re.search('[SaUO]', elem.dtype.str) is not None:
                raise TypeError(error_msg.format(elem.dtype))

            return torch.stack([torch.from_numpy(b) for b in batch], 0)
        if elem.shape == ():  # scalars
            py_type = float if elem.dtype.name.startswith('float') else int
            return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
    elif isinstance(batch[0], int_classes):
        return torch.LongTensor(batch)
    elif isinstance(batch[0], float):
        return torch.DoubleTensor(batch)
    elif isinstance(batch[0], string_classes):
        return batch
    elif isinstance(batch[0], collections.Mapping):
        return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
    elif isinstance(batch[0], collections.Sequence):
        transposed = zip(*batch)
        return [default_collate(samples) for samples in transposed]

    raise TypeError((error_msg.format(type(batch[0])))) 
Example #30
Source File: dataloader.py    From EMANet with GNU General Public License v3.0 5 votes vote down vote up
def pin_memory_batch(batch):
    if torch.is_tensor(batch):
        return batch.pin_memory()
    elif isinstance(batch, string_classes):
        return batch
    elif isinstance(batch, collections.Mapping):
        return {k: pin_memory_batch(sample) for k, sample in batch.items()}
    elif isinstance(batch, collections.Sequence):
        return [pin_memory_batch(sample) for sample in batch]
    else:
        return batch