Python torch.tensors() Examples

The following are 22 code examples of torch.tensors(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: utils.py    From xfer with Apache License 2.0 6 votes vote down vote up
def unflatten_like(vector, likeTensorList):
    """
    Takes a flat torch.tensor and unflattens it to a list of torch.tensors
        shaped like likeTensorList
    Arguments:
    vector (torch.tensor): flat one dimensional tensor
    likeTensorList (list or iterable): list of tensors with same number of ele-
        ments as vector
    """
    outList = []
    i = 0
    for tensor in likeTensorList:
        n = tensor.numel()
        outList.append(vector[i : i + n].view(tensor.shape))
        i += n
    return outList 
Example #2
Source File: dataset.py    From transfer-nlp with MIT License 6 votes vote down vote up
def load_data_lm():
    dataset_file = cached_path("https://s3.amazonaws.com/datasets.huggingface.co/wikitext-103/"
                               "wikitext-103-train-tokenized-bert.bin")
    datasets = torch.load(dataset_file)

    # Convert our encoded dataset to torch.tensors and reshape in blocks of the transformer's input length
    for split_name in ['train', 'valid']:
        tensor = torch.tensor(datasets[split_name], dtype=torch.long)
        num_sequences = (tensor.size(0) // 256) * 256
        datasets[split_name] = tensor.narrow(0, 0, num_sequences).view(-1, 256)

    n = len(datasets['valid']) // 2
    datasets['test'] = datasets['valid'][n:]
    datasets['valid'] = datasets['valid'][:n]
    datasets['train'] = datasets['train'][:1000]
    return datasets 
Example #3
Source File: dataset.py    From transfer-nlp with MIT License 6 votes vote down vote up
def load_data_lm():
    dataset_file = cached_path("https://s3.amazonaws.com/datasets.huggingface.co/wikitext-103/"
                               "wikitext-103-train-tokenized-bert.bin")
    datasets = torch.load(dataset_file)

    # Convert our encoded dataset to torch.tensors and reshape in blocks of the transformer's input length
    for split_name in ['train', 'valid']:
        tensor = torch.tensor(datasets[split_name], dtype=torch.long)
        num_sequences = (tensor.size(0) // 256) * 256
        datasets[split_name] = tensor.narrow(0, 0, num_sequences).view(-1, 256)

    n = len(datasets['valid']) // 2
    datasets['test'] = datasets['valid'][n:]
    datasets['valid'] = datasets['valid'][:n]
    datasets['train'] = datasets['train'][:1000]
    return datasets 
Example #4
Source File: data_convertors.py    From DualResidualNetworks with MIT License 6 votes vote down vote up
def __getitem__(self, index):
        im_name         = self.imlist[index]
        im_input, label = self.sample_loader(im_name)


        # Resize a sample, or not.
        if not self.resize_to is None:
            im_input = cv2.resize(im_input, self.resize_to)
            label    = cv2.resize(label,    self.resize_to)

        
        # Transform: output torch.tensors of [0,1] and (C,H,W).
        # Note: for test on DDN_Data and RESIDE, the output is in [0,1] and (V,C,H,W).
        #       V means the distortation types of a dataset (e.g., V == 14 for DDN_Data)
        if not self.transform is None:
            im_input, label = self.Transformer(im_input, label)
            
        return im_input, label, im_name

            
    # Read a image name list. 
Example #5
Source File: test_gradients.py    From CrypTen with MIT License 6 votes vote down vote up
def _set_grad_to_zero(self, args, make_private=False):
        """Sets gradients for args to zero

        Args:
            args (list of torch.tensors): contains arguments
            make_private (bool): encrypt args using CrypTensor
        """
        args_zero_grad = []

        for arg in args:
            if is_float_tensor(arg) and make_private:
                arg = crypten.cryptensor(arg, requires_grad=True)
            elif is_float_tensor(arg):
                arg.requires_grad = True
                arg.grad = None

            args_zero_grad.append(arg)

        return args_zero_grad 
Example #6
Source File: test_gradients.py    From CrypTen with MIT License 6 votes vote down vote up
def _reductions_helper(self, input_reductions, method=None):
        """Tests input reductions on tensors of various sizes."""
        for size in SIZES:
            tensor = get_random_test_tensor(size=size, is_float=True)
            for reduction in input_reductions:
                if method is None:
                    self._check_forward_backward(reduction, tensor)
                else:
                    with crypten.mpc.ConfigManager("max_method", method):
                        self._check_forward_backward(reduction, tensor)

                # Check dim 0 if tensor is 0-dimensional
                dims = 1 if tensor.dim() == 0 else tensor.dim()
                for dim in range(dims):
                    for keepdim in [False, True]:
                        if method is None:
                            self._check_forward_backward(
                                reduction, tensor, dim, keepdim=keepdim
                            )
                        else:
                            with crypten.mpc.ConfigManager("max_method", method):
                                self._check_forward_backward(
                                    reduction, tensor, dim, keepdim=keepdim
                                ) 
Example #7
Source File: circular_replay_buffer.py    From ReAgent with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _add(self, *args, **kwargs):
        """Internal add method to add to the storage arrays.
        Args:
          *args: All the elements in a transition.
        """
        self._check_args_length(*args, **kwargs)
        elements = self.get_add_args_signature()
        # convert kwarg np.arrays to torch.tensors
        for element in elements[len(args) :]:
            if element.name in kwargs:
                kwargs[element.name] = torch.from_numpy(
                    np.array(kwargs[element.name], dtype=element.type)
                )
        # convert arg np.arrays to torch.tensors
        kwargs.update(
            {
                e.name: torch.from_numpy(np.array(arg, dtype=e.type))
                for arg, e in zip(args, elements[: len(args)])
            }
        )
        self._add_transition(kwargs) 
Example #8
Source File: test_gradients.py    From CrypTen with MIT License 6 votes vote down vote up
def _conv1d(self, signal_size, in_channels):
        """Test convolution of encrypted tensor with public/private tensors."""
        nbatches = [1, 3]
        nout_channels = [1, 5]
        kernel_sizes = [1, 2, 3]
        paddings = [0, 1]
        strides = [1, 2]

        for batches in nbatches:
            size = (batches, in_channels, signal_size)
            signal = get_random_test_tensor(size=size, is_float=True)

            for kernel_size, out_channels in itertools.product(
                kernel_sizes, nout_channels
            ):
                kernel_size = (out_channels, in_channels, kernel_size)
                kernel = get_random_test_tensor(size=kernel_size, is_float=True)

                for padding in paddings:
                    for stride in strides:
                        self._check_forward_backward(
                            "conv1d", signal, kernel, stride=stride, padding=padding
                        ) 
Example #9
Source File: distance.py    From heat with MIT License 6 votes vote down vote up
def _gaussian_fast(x, y, sigma=1.0):
    """
    Helper function to calculate gaussian distance between torch.tensors x and y: exp(-(|x-y|**2/2sigma**2)
    Uses quadratic expansion to calculate (x-y)**2

    Parameters
    ----------
    x : torch.tensor
        2D tensor of size m x f
    y : torch.tensor
        2D tensor of size n x f
    sigma: float, default=1.0
        scaling factor for gaussian kernel

    Returns
    -------
    torch.tensor
        2D tensor of size m x n
    """

    d2 = _quadratic_expand(x, y)
    result = torch.exp(-d2 / (2 * sigma * sigma))
    return result 
Example #10
Source File: distance.py    From heat with MIT License 6 votes vote down vote up
def _gaussian(x, y, sigma=1.0):
    """
    Helper function to calculate gaussian distance between torch.tensors x and y: exp(-(|x-y|**2/2sigma**2)
    Based on torch.cdist

    Parameters
    ----------
    x : torch.tensor
        2D tensor of size m x f
    y : torch.tensor
        2D tensor of size n x f
    sigma: float, default=1.0
        scaling factor for gaussian kernel

    Returns
    -------
    torch.tensor
        2D tensor of size m x n
    """
    d2 = _euclidian(x, y) ** 2
    result = torch.exp(-d2 / (2 * sigma * sigma))
    return result 
Example #11
Source File: distance.py    From heat with MIT License 6 votes vote down vote up
def _euclidian(x, y):
    """
    Helper function to calculate euclidian distance between torch.tensors x and y: sqrt(|x-y|**2)
    Based on torch.cdist

    Parameters
    ----------
    x : torch.tensor
        2D tensor of size m x f
    y : torch.tensor
        2D tensor of size n x f

    Returns
    -------
    torch.tensor
        2D tensor of size m x n
    """
    return torch.cdist(x, y) 
Example #12
Source File: distance.py    From heat with MIT License 6 votes vote down vote up
def _euclidian_fast(x, y):
    """
    Helper function to calculate euclidian distance between torch.tensors x and y: sqrt(|x-y|**2)
    Uses quadratic expansion to calculate (x-y)**2

    Parameters
    ----------
    x : torch.tensor
        2D tensor of size m x f
    y : torch.tensor
        2D tensor of size n x f

    Returns
    -------
    torch.tensor
        2D tensor of size m x n
    """
    return torch.sqrt(_quadratic_expand(x, y)) 
Example #13
Source File: adaptation_networks.py    From cnaps with MIT License 5 votes vote down vote up
def forward(self, representation_dict):
        """
        Forward pass through adaptation network. Returns classification parameters for task.
        :param representation_dict: (dict::torch.tensors) Dictionary containing class-level representations for each
                                    class in the task.
        :return: (dict::torch.tensors) Dictionary containing the weights and biases for the classification of each class
                 in the task. Model can extract parameters and build the classifier accordingly. Supports sampling if
                 ML-PIP objective is desired.
        """
        classifier_param_dict = {}
        class_weight_means = []
        class_bias_means = []

        # Extract and sort the label set for the task
        label_set = list(representation_dict.keys())
        label_set.sort()
        num_classes = len(label_set)

        # For each class, extract the representation and pass it through adaptation network to generate classification
        # params for that class. Store parameters in a list,
        for class_num in label_set:
            nu = representation_dict[class_num]
            class_weight_means.append(self.weight_means_processor(nu))
            class_bias_means.append(self.bias_means_processor(nu))

        # Save the parameters as torch tensors (matrix and vector) and add to dictionary
        classifier_param_dict['weight_mean'] = torch.cat(class_weight_means, dim=0)
        classifier_param_dict['bias_mean'] = torch.reshape(torch.cat(class_bias_means, dim=1), [num_classes, ])

        return classifier_param_dict 
Example #14
Source File: circular_replay_buffer.py    From ReAgent with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def load(self, checkpoint_dir, suffix):
        """Restores the object from bundle_dictionary and numpy checkpoints.
        Args:
          checkpoint_dir: str, the directory where to read the numpy checkpointed
            files from.
          suffix: str, the suffix to use in numpy checkpoint files.
        Raises:
          NotFoundError: If not all expected files are found in directory.
        """
        # TODO: Load tensors from torch files.
        save_elements = self._return_checkpointable_elements()
        # We will first make sure we have all the necessary files available to avoid
        # loading a partially-specified (i.e. corrupted) replay buffer.
        for attr in save_elements:
            filename = self._generate_filename(checkpoint_dir, attr, suffix)
            if not os.path.exists(filename):
                raise FileNotFoundError(None, None, "Missing file: {}".format(filename))
        # If we've reached this point then we have verified that all expected files
        # are available.
        for attr in save_elements:
            filename = self._generate_filename(checkpoint_dir, attr, suffix)
            with open(filename, "rb") as f:
                with gzip.GzipFile(fileobj=f) as infile:
                    if attr.startswith(STORE_FILENAME_PREFIX):
                        array_name = attr[len(STORE_FILENAME_PREFIX) :]
                        self._store[array_name] = torch.from_numpy(
                            np.load(infile, allow_pickle=False)
                        )
                    elif isinstance(self.__dict__[attr], np.ndarray):
                        self.__dict__[attr] = np.load(infile, allow_pickle=False)
                    else:
                        self.__dict__[attr] = pickle.load(infile) 
Example #15
Source File: test_gradients.py    From CrypTen with MIT License 5 votes vote down vote up
def test_cat_stack(self):
        for func in ["cat", "stack"]:
            for dimensions in range(1, 5):
                size = [5] * dimensions
                for num_tensors in range(1, 5):
                    for dim in range(dimensions):
                        tensors = [
                            get_random_test_tensor(size=size, is_float=True)
                            for _ in range(num_tensors)
                        ]
                        encrypted_tensors = [
                            crypten.cryptensor(t, requires_grad=True) for t in tensors
                        ]
                        for i in range(len(tensors)):
                            tensors[i].grad = None
                            tensors[i].requires_grad = True
                            encrypted_tensors[i].grad = None
                            encrypted_tensors[i].requires_grad = True

                        # Forward
                        reference = getattr(torch, func)(tensors, dim=dim)
                        encrypted_out = getattr(crypten, func)(
                            encrypted_tensors, dim=dim
                        )
                        self._check(encrypted_out, reference, f"{func} forward failed")

                        # Backward
                        grad_output = get_random_test_tensor(
                            size=reference.size(), is_float=True
                        )
                        encrypted_grad_output = crypten.cryptensor(grad_output)

                        reference.backward(grad_output)
                        encrypted_out.backward(encrypted_grad_output)
                        for i in range(len(tensors)):
                            self._check(
                                encrypted_tensors[i].grad,
                                tensors[i].grad,
                                f"{func} backward failed",
                            ) 
Example #16
Source File: test_gradients.py    From CrypTen with MIT License 5 votes vote down vote up
def test_clone(self):
        """Tests shallow_copy and clone of encrypted tensors."""
        sizes = [(5,), (1, 5), (5, 10, 15)]
        for size in sizes:
            tensor = get_random_test_tensor(size=size, is_float=True)
            self._check_forward_backward("clone", tensor) 
Example #17
Source File: test_gradients.py    From CrypTen with MIT License 5 votes vote down vote up
def test_dot_ger(self):
        """Test inner and outer products of encrypted tensors."""
        for length in range(1, 10):
            tensor1 = get_random_test_tensor(size=(length,), is_float=True)
            tensor2 = get_random_test_tensor(size=(length,), is_float=True)

            self._check_forward_backward("dot", tensor1, tensor2)
            self._check_forward_backward("ger", tensor1, tensor2) 
Example #18
Source File: test_gradients.py    From CrypTen with MIT License 5 votes vote down vote up
def test_unary_functions(self):
        """Test unary functions on tensors of various sizes."""
        unary_functions = [
            "neg",
            "__neg__",
            "exp",
            "reciprocal",
            "abs",
            "__abs__",
            "sign",
            "relu",
            "sin",
            "cos",
            "sigmoid",
            "tanh",
            "log",
            "sqrt",
        ]
        pos_only_functions = ["log", "sqrt"]
        for func in unary_functions:
            for size in SIZES:
                tensor = get_random_test_tensor(size=size, is_float=True)

                # Make tensor positive when positive inputs are required
                if func in pos_only_functions:
                    tensor = tensor.abs()

                self._check_forward_backward(func, tensor) 
Example #19
Source File: segmentation_mask.py    From EmbedMask with MIT License 4 votes vote down vote up
def __init__(self, masks, size):
        """
            Arguments:
                masks: Either torch.tensor of [num_instances, H, W]
                    or list of torch.tensors of [H, W] with num_instances elems,
                    or RLE (Run Length Encoding) - interpreted as list of dicts,
                    or BinaryMaskList.
                size: absolute image size, width first

            After initialization, a hard copy will be made, to leave the
            initializing source data intact.
        """

        if isinstance(masks, torch.Tensor):
            # The raw data representation is passed as argument
            masks = masks.clone()
        elif isinstance(masks, (list, tuple)):
            if isinstance(masks[0], torch.Tensor):
                masks = torch.stack(masks, dim=2).clone()
            elif isinstance(masks[0], dict) and "count" in masks[0]:
                # RLE interpretation

                masks = mask_utils
            else:
                RuntimeError(
                    "Type of `masks[0]` could not be interpreted: %s" % type(masks)
                )
        elif isinstance(masks, BinaryMaskList):
            # just hard copy the BinaryMaskList instance's underlying data
            masks = masks.masks.clone()
        else:
            RuntimeError(
                "Type of `masks` argument could not be interpreted:%s" % type(masks)
            )

        if len(masks.shape) == 2:
            # if only a single instance mask is passed
            masks = masks[None]

        assert len(masks.shape) == 3
        assert masks.shape[1] == size[1], "%s != %s" % (masks.shape[1], size[1])
        assert masks.shape[2] == size[0], "%s != %s" % (masks.shape[2], size[0])

        self.masks = masks
        self.size = tuple(size) 
Example #20
Source File: circular_replay_buffer.py    From ReAgent with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def save(self, checkpoint_dir, iteration_number):
        """Save the ReplayBuffer attributes into a file.
        This method will save all the replay buffer's state in a single file.
        Args:
          checkpoint_dir: str, the directory where numpy checkpoint files should be
            saved.
          iteration_number: int, iteration_number to use as a suffix in naming
            numpy checkpoint files.
        """
        # TODO: Save tensors to torch files.
        if not os.path.exists(checkpoint_dir):
            return

        checkpointable_elements = self._return_checkpointable_elements()

        for attr in checkpointable_elements:
            filename = self._generate_filename(checkpoint_dir, attr, iteration_number)
            with open(filename, "wb") as f:
                with gzip.GzipFile(fileobj=f) as outfile:
                    # Checkpoint the np arrays in self._store with np.save instead of
                    # pickling the dictionary is critical for file size and performance.
                    # STORE_FILENAME_PREFIX indicates that the variable is contained in
                    # self._store.
                    if attr.startswith(STORE_FILENAME_PREFIX):
                        array_name = attr[len(STORE_FILENAME_PREFIX) :]
                        np.save(
                            outfile, self._store[array_name].numpy(), allow_pickle=False
                        )
                    # Some numpy arrays might not be part of storage
                    elif isinstance(self.__dict__[attr], np.ndarray):
                        np.save(outfile, self.__dict__[attr], allow_pickle=False)
                    else:
                        pickle.dump(self.__dict__[attr], outfile)

            # After writing a checkpoint file, we garbage collect the checkpoint file
            # that is four versions old.
            stale_iteration_number = iteration_number - CHECKPOINT_DURATION
            if stale_iteration_number >= 0:
                stale_filename = self._generate_filename(
                    checkpoint_dir, attr, stale_iteration_number
                )
                try:
                    os.remove(stale_filename)
                except FileNotFoundError:
                    pass 
Example #21
Source File: segmentation_mask.py    From Parsing-R-CNN with MIT License 4 votes vote down vote up
def __init__(self, masks, size):
        """
            Arguments:
                masks: Either torch.tensor of [num_instances, H, W]
                    or list of torch.tensors of [H, W] with num_instances elems,
                    or RLE (Run Length Encoding) - interpreted as list of dicts,
                    or BinaryMaskList.
                size: absolute image size, width first

            After initialization, a hard copy will be made, to leave the
            initializing source data intact.
        """

        if isinstance(masks, torch.Tensor):
            # The raw data representation is passed as argument
            masks = masks.clone()
        elif isinstance(masks, (list, tuple)):
            if isinstance(masks[0], torch.Tensor):
                masks = torch.stack(masks, dim=2).clone()
            elif isinstance(masks[0], dict) and "counts" in masks[0]:
                # RLE interpretation
                assert all(
                    [(size[1], size[0]) == tuple(inst["size"]) for inst in masks]
                )  # in RLE, height come first in "size"
                masks = mask_utils.decode(masks)  # [h, w, n]
                masks = torch.tensor(masks).permute(2, 0, 1)  # [n, h, w]
            else:
                RuntimeError(
                    "Type of `masks[0]` could not be interpreted: %s" % type(masks)
                )
        elif isinstance(masks, BinaryMaskList):
            # just hard copy the BinaryMaskList instance's underlying data
            masks = masks.masks.clone()
        else:
            RuntimeError(
                "Type of `masks` argument could not be interpreted:%s" % type(masks)
            )

        if len(masks.shape) == 2:
            # if only a single instance mask is passed
            masks = masks[None]

        assert len(masks.shape) == 3
        assert masks.shape[1] == size[1], "%s != %s" % (masks.shape[1], size[1])
        assert masks.shape[2] == size[0], "%s != %s" % (masks.shape[2], size[0])

        self.masks = masks
        self.size = tuple(size) 
Example #22
Source File: segmentation_mask.py    From DF-Traffic-Sign-Identification with MIT License 4 votes vote down vote up
def __init__(self, masks, size):
        """
            Arguments:
                masks: Either torch.tensor of [num_instances, H, W]
                    or list of torch.tensors of [H, W] with num_instances elems,
                    or RLE (Run Length Encoding) - interpreted as list of dicts,
                    or BinaryMaskList.
                size: absolute image size, width first

            After initialization, a hard copy will be made, to leave the
            initializing source data intact.
        """

        if isinstance(masks, torch.Tensor):
            # The raw data representation is passed as argument
            masks = masks.clone()
        elif isinstance(masks, (list, tuple)):
            if isinstance(masks[0], torch.Tensor):
                masks = torch.stack(masks, dim=2).clone()
            elif isinstance(masks[0], dict) and "counts" in masks[0]:
                # RLE interpretation
                assert all(
                    [(size[1], size[0]) == tuple(inst["size"]) for inst in masks]
                )  # in RLE, height come first in "size"
                masks = mask_utils.decode(masks)  # [h, w, n]
                masks = torch.tensor(masks).permute(2, 0, 1)  # [n, h, w]
            else:
                RuntimeError(
                    "Type of `masks[0]` could not be interpreted: %s" % type(masks)
                )
        elif isinstance(masks, BinaryMaskList):
            # just hard copy the BinaryMaskList instance's underlying data
            masks = masks.masks.clone()
        else:
            RuntimeError(
                "Type of `masks` argument could not be interpreted:%s" % type(masks)
            )

        if len(masks.shape) == 2:
            # if only a single instance mask is passed
            masks = masks[None]

        assert len(masks.shape) == 3
        assert masks.shape[1] == size[1], "%s != %s" % (masks.shape[1], size[1])
        assert masks.shape[2] == size[0], "%s != %s" % (masks.shape[2], size[0])

        self.masks = masks
        self.size = tuple(size)