Python torch.bfloat16() Examples

The following are 9 code examples of torch.bfloat16(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: utils.py    From fairseq with MIT License 6 votes vote down vote up
def move_to_cpu(sample):
    def _move_to_cpu(tensor):
        # PyTorch has poor support for half tensors (float16) on CPU.
        # Move any such tensors to float32.
        if tensor.dtype in {torch.bfloat16, torch.float16}:
            tensor = tensor.to(dtype=torch.float32)
        return tensor.cpu()

    return apply_to_sample(_move_to_cpu, sample) 
Example #2
Source File: trainer.py    From fairseq with MIT License 5 votes vote down vote up
def _prepare_sample(self, sample):
        if sample == "DUMMY":
            raise Exception(
                "Trying to use an uninitialized 'dummy' batch. This usually indicates "
                "that the total number of batches is smaller than the number of "
                "participating GPUs. Try reducing the batch size or using fewer GPUs."
            )

        if sample is None or len(sample) == 0:
            return None

        if self.cuda:
            sample = utils.move_to_cuda(sample)

        def apply_half(t):
            if t.dtype is torch.float32:
                return t.half()
            return t

        def apply_bfloat16(t):
            if t.dtype is torch.float32:
                return t.to(dtype=torch.bfloat16)
            return t

        if self.args.fp16:
            sample = utils.apply_to_sample(apply_half, sample)

        if self.args.bf16:
            sample = utils.apply_to_sample(apply_bfloat16, sample)

        return sample 
Example #3
Source File: test_protobuf_serde.py    From PySyft with Apache License 2.0 5 votes vote down vote up
def test_protobuf_serde_tensor_roundtrip(str_dtype):
    """Checks that tensors passed through serialization-deserialization stay same"""

    def compare(roundtrip, original):
        assert type(roundtrip) == torch.Tensor
        assert roundtrip.dtype == original.dtype

        # PyTorch doesn't implement equality checking for bfloat16, so convert to float
        if original.dtype == torch.bfloat16:
            roundtrip = roundtrip.float()
            original = original.float()

        # PyTorch doesn't implement equality checking for float16, so use numpy
        assert numpy.array_equal(roundtrip.data.numpy(), original.data.numpy())
        return True

    serde_worker = syft.hook.local_worker
    original_framework = serde_worker.framework
    serde_worker.framework = None

    tensor = torch.rand([10, 10]) * 16
    tensor = tensor.to(TORCH_STR_DTYPE[str_dtype])

    protobuf_tensor = protobuf.serde._bufferize(serde_worker, tensor)
    roundtrip_tensor = protobuf.serde._unbufferize(serde_worker, protobuf_tensor)

    serde_worker.framework = original_framework

    assert compare(roundtrip_tensor, tensor) is True


# quantized types can't be created by conversion with `tensor.to()` 
Example #4
Source File: debug_lm.py    From ru_transformers with Apache License 2.0 5 votes vote down vote up
def bhalf(module):
    return module._apply(lambda t: t.to(torch.bfloat16) if t.is_floating_point() else t) 
Example #5
Source File: tpu_lm_finetuning.py    From ru_transformers with Apache License 2.0 5 votes vote down vote up
def bhalf(module):
    return module._apply(lambda t: t.to(torch.bfloat16) if t.is_floating_point() else t) 
Example #6
Source File: tensor.py    From pytorch_sparse with MIT License 5 votes vote down vote up
def bfloat16(self):
        return self.type_as(
            torch.tensor(0, dtype=torch.bfloat16, device=self.device())) 
Example #7
Source File: pytorch.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def _pytorch_result_type(dtypes, non_tensor_inputs):
    """This promotes TVM dtypes like PyTorch would"""
    import torch
    dtype_map = {
        "float64": torch.float64,
        "float32": torch.float32,
        "float16": torch.float16,
        "bfloat16": torch.bfloat16,
        "int64": torch.int64,
        "int32": torch.int32,
        "int16": torch.int16,
        "int8": torch.int8,
        "uint8": torch.uint8,
        "bool": torch.bool
        }
    if len(dtypes) > 0:
        result_type = dtypes[0]
        for dt in dtypes[1:]:
            if dt != result_type: # we don't want to work with same types as we
                                  # don't do quantized here (which cannot be promoted?)
                result_type = _convert_data_type(str(torch.result_type(
                    torch.zeros((), dtype=dtype_map[result_type]),
                    torch.zeros((), dtype=dtype_map[dt]))))
    else:
        result_type = "bool"  # this is the smallest type...
    for inp in non_tensor_inputs:
        result_type = _convert_data_type(
            str(torch.result_type(torch.zeros((), dtype=dtype_map[result_type]),
                                  inp)))
    return result_type 
Example #8
Source File: nag.py    From fairseq with MIT License 4 votes vote down vote up
def step(self, closure=None):
        """Performs a single optimization step.

        Arguments:
            closure (callable, optional): A closure that reevaluates the model
                and returns the loss.
        """
        loss = None
        if closure is not None:
            loss = closure()

        for group in self.param_groups:
            weight_decay = group['weight_decay']
            momentum = group['momentum']
            lr = group['lr']
            lr_old = group.get('lr_old', lr)
            lr_correct = lr / lr_old

            for p in group['params']:
                if p.grad is None:
                    continue

                p_data_fp32 = p.data
                if p_data_fp32.dtype in {torch.float16, torch.bfloat16}:
                    p_data_fp32 = p_data_fp32.float()

                d_p = p.grad.data.float()
                param_state = self.state[p]
                if 'momentum_buffer' not in param_state:
                    param_state['momentum_buffer'] = torch.zeros_like(d_p)
                else:
                    param_state['momentum_buffer'] = param_state['momentum_buffer'].to(d_p)

                buf = param_state['momentum_buffer']

                if weight_decay != 0:
                    p_data_fp32.mul_(1 - lr * weight_decay)
                p_data_fp32.add_(buf, alpha=momentum * momentum * lr_correct)
                p_data_fp32.add_(d_p, alpha=-(1 + momentum) * lr)

                buf.mul_(momentum * lr_correct).add_(d_p, alpha=-lr)

                if p.data.dtype in {torch.float16, torch.bfloat16}:
                    p.data.copy_(p_data_fp32)

            group['lr_old'] = lr

        return loss 
Example #9
Source File: dlrm_s_pytorch.py    From optimized-models with Apache License 2.0 4 votes vote down vote up
def sequential_forward(self, dense_x, lS_o, lS_i):
        # process dense features (using bottom mlp), resulting in a row vector
        if self.bf16:
            dense_x = dense_x.to_mkldnn(torch.bfloat16)
        elif self.fp32:
            dense_x = dense_x.to_mkldnn()

        x = self.apply_mlp(dense_x, self.bot_l)

        if self.bf16:
            x = x.to_dense(torch.float)
        elif self.fp32:
            x = x.to_dense()
        # debug prints
        # print("intermediate")
        # print(x.detach().cpu().numpy())

        # process sparse features(using embeddings), resulting in a list of row vectors
        ly = self.apply_emb(lS_o, lS_i, self.emb_l)
        # for y in ly:
        #     print(y.detach().cpu().numpy())

        # interact features (dense and sparse)
        z = self.interact_features(x, ly)
        # print(z.detach().cpu().numpy())

        # obtain probability of a click (using top mlp)
        if self.bf16:
            z = z.to_mkldnn(torch.bfloat16)
        elif self.fp32:
            z = z.to_mkldnn()

        p = self.apply_mlp(z, self.top_l)

        if self.bf16:
            p = p.to_dense(torch.float)
        elif self.fp32:
            p = p.to_dense()

        # clamp output if needed
        if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
            z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold))
        else:
            z = p

        return z