Python torch.short() Examples

The following are 6 code examples of torch.short(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: types.py    From chainer-compiler with MIT License 6 votes vote down vote up
def torch_dtype_to_np_dtype(dtype):
    dtype_dict = {
            torch.bool    : np.dtype(np.bool),
            torch.uint8   : np.dtype(np.uint8),
            torch.int8    : np.dtype(np.int8),
            torch.int16   : np.dtype(np.int16),
            torch.short   : np.dtype(np.int16),
            torch.int32   : np.dtype(np.int32),
            torch.int     : np.dtype(np.int32),
            torch.int64   : np.dtype(np.int64),
            torch.long    : np.dtype(np.int64),
            torch.float16 : np.dtype(np.float16),
            torch.half    : np.dtype(np.float16),
            torch.float32 : np.dtype(np.float32),
            torch.float   : np.dtype(np.float32),
            torch.float64 : np.dtype(np.float64),
            torch.double  : np.dtype(np.float64),
            }
    return dtype_dict[dtype]


# ---------------------- InferenceEngine internal types ------------------------ 
Example #2
Source File: test_types.py    From heat with MIT License 6 votes vote down vote up
def test_canonical_heat_type(self):
        self.assertEqual(ht.core.types.canonical_heat_type(ht.float32), ht.float32)
        self.assertEqual(ht.core.types.canonical_heat_type("?"), ht.bool)
        self.assertEqual(ht.core.types.canonical_heat_type(int), ht.int32)
        self.assertEqual(ht.core.types.canonical_heat_type("u1"), ht.uint8)
        self.assertEqual(ht.core.types.canonical_heat_type(np.int8), ht.int8)
        self.assertEqual(ht.core.types.canonical_heat_type(torch.short), ht.int16)

        with self.assertRaises(TypeError):
            ht.core.types.canonical_heat_type({})
        with self.assertRaises(TypeError):
            ht.core.types.canonical_heat_type(object)
        with self.assertRaises(TypeError):
            ht.core.types.canonical_heat_type(1)
        with self.assertRaises(TypeError):
            ht.core.types.canonical_heat_type("i7") 
Example #3
Source File: test_types.py    From heat with MIT License 5 votes vote down vote up
def test_int16(self):
        self.assert_is_instantiable_heat_type(ht.int16, torch.int16)
        self.assert_is_instantiable_heat_type(ht.short, torch.int16) 
Example #4
Source File: tensor.py    From pytorch_sparse with MIT License 5 votes vote down vote up
def short(self):
        return self.type_as(
            torch.tensor(0, dtype=torch.short, device=self.device())) 
Example #5
Source File: technology.py    From torchfunc with MIT License 5 votes vote down vote up
def __init__(
        self,
        linear_types=(torch.nn.Linear, torch.nn.Bilinear,),
        convolution_types=(torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d,),
        linear_inputs=None,
        linear_outputs=None,
        convolution_inputs=None,
        convolution_outputs=None,
        float_types=(torch.half,),
        integer_types=(torch.short,),
    ):

        self.linear_types = linear_types
        self.convolution_types = convolution_types
        if linear_inputs is None:
            self.linear_inputs = collections.defaultdict(lambda: ("in_features",))
            self.linear_inputs[torch.nn.Bilinear] = ("in_features1", "in_features2")
        else:
            self.linear_inputs = linear_inputs
        if linear_outputs is None:
            self.linear_outputs = collections.defaultdict(lambda: ("out_features",))
        else:
            self.linear_outputs = linear_outputs
        if convolution_inputs is None:
            self.convolution_inputs = collections.defaultdict(lambda: ("in_channels",))
        else:
            self.convolution_inputs = convolution_inputs
        if convolution_outputs is None:
            self.convolution_outputs = collections.defaultdict(
                lambda: ("out_channels",)
            )
        else:
            self.convolution_outputs = convolution_outputs
        self.float_types = float_types
        self.integer_types = integer_types 
Example #6
Source File: technology.py    From torchfunc with MIT License 4 votes vote down vote up
def tips(self, module: torch.nn.Module) -> str:
        r"""**Return** `str` **representation of** `modules()` **method.**

        It is advised to use this function to get tips in order to easily fix
        possible performance issues related to Tensor Cores.

        Parameters
        ----------
        module : torch.nn.Module
                Module to be scanned

        Returns
        -------
        str
                String representing tips related to Tensor Cores.
        """
        data = self.modules(module)

        def types():
            _types = data["type"]

            def parse_type(is_float: bool, goal):
                key = "float" if is_float else "integer"
                if _types[key]:
                    return "\nModules where {} type is not {}:\n".format(
                        key, goal
                    ) + str(_types[key])
                return ""

            return parse_type(True, "torch.half") + parse_type(False, "torch.short")

        def shape():
            def parse_shape(dictionary, is_input: bool, goal):
                key = "inputs" if is_input else "outputs"
                if dictionary[key]:
                    return "\nModules where {} shape should be divisible by {}:\n".format(
                        key, goal
                    ) + str(
                        dictionary[key]
                    )
                return ""

            _shapes = data["shape"]

            def floating():
                _floats = _shapes["float"]
                return parse_shape(_floats, True, 8) + parse_shape(_floats, False, 8)

            def integer():
                _integers = _shapes["integer"]
                return parse_shape(_integers, True, 16) + parse_shape(
                    _integers, False, 16
                )

            return floating() + integer()

        output = types() + shape()
        if output != "":
            output = "TensorCores incompatible modules:" + output
        return output