Python torch.__version__() Examples

The following are 30 code examples of torch.__version__(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: trainer.py    From pytorch-fcn with MIT License 6 votes vote down vote up
def cross_entropy2d(input, target, weight=None, size_average=True):
    # input: (n, c, h, w), target: (n, h, w)
    n, c, h, w = input.size()
    # log_p: (n, c, h, w)
    if LooseVersion(torch.__version__) < LooseVersion('0.3'):
        # ==0.2.X
        log_p = F.log_softmax(input)
    else:
        # >=0.3
        log_p = F.log_softmax(input, dim=1)
    # log_p: (n*h*w, c)
    log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous()
    log_p = log_p[target.view(n, h, w, 1).repeat(1, 1, 1, c) >= 0]
    log_p = log_p.view(-1, c)
    # target: (n*h*w,)
    mask = target >= 0
    target = target[mask]
    loss = F.nll_loss(log_p, target, weight=weight, reduction='sum')
    if size_average:
        loss /= mask.data.sum()
    return loss 
Example #2
Source File: wrappers.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def forward(self, x):
        if x.numel() == 0 and torch.__version__ <= '1.4.0':
            out_shape = [x.shape[0], self.out_channels]
            for i, k, p, s, d, op in zip(x.shape[-2:], self.kernel_size,
                                         self.padding, self.stride,
                                         self.dilation, self.output_padding):
                out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op)
            empty = NewEmptyTensorOp.apply(x, out_shape)
            if self.training:
                # produce dummy gradient to avoid DDP warning.
                dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
                return empty + dummy
            else:
                return empty

        return super(ConvTranspose2d, self).forward(x) 
Example #3
Source File: torch_scheduler.py    From torchbearer with MIT License 6 votes vote down vote up
def __init__(self,  base_lr, max_lr, monitor='val_loss', step_size_up=2000, step_size_down=None, mode='triangular',
                 gamma=1., scale_fn=None, scale_mode='cycle', cycle_momentum=True, base_momentum=0.8, max_momentum=0.9,
                 last_epoch=-1, step_on_batch=False):
        from distutils.version import LooseVersion
        version = torch.__version__ if str(torch.__version__) is torch.__version__ else "0.4.0"
        if LooseVersion(version) > LooseVersion("1.0.0"):  # CyclicLR is implemented
            super(CyclicLR, self).__init__(lambda opt:
                                                    torch.optim.lr_scheduler.CyclicLR(
                                                        opt, base_lr, max_lr, step_size_up=step_size_up,
                                                        step_size_down=step_size_down, mode=mode, gamma=gamma,
                                                        scale_fn=scale_fn, scale_mode=scale_mode,
                                                        cycle_momentum=cycle_momentum, base_momentum=base_momentum,
                                                        max_momentum=max_momentum, last_epoch=last_epoch),
                                           monitor=monitor, step_on_batch=step_on_batch)
        else:
            raise NotImplementedError('CyclicLR scheduler was not implemented in PyTorch versions less than 1.1.0. '
                                      'Update PyTorch or use the CyclicLR callback from an older Torchbearer version.') 
Example #4
Source File: misc.py    From Pytorch-Project-Template with MIT License 6 votes vote down vote up
def print_cuda_statistics():
    logger = logging.getLogger("Cuda Statistics")
    import sys
    from subprocess import call
    import torch
    logger.info('__Python VERSION:  {}'.format(sys.version))
    logger.info('__pyTorch VERSION:  {}'.format(torch.__version__))
    logger.info('__CUDA VERSION')
    call(["nvcc", "--version"])
    logger.info('__CUDNN VERSION:  {}'.format(torch.backends.cudnn.version()))
    logger.info('__Number CUDA Devices:  {}'.format(torch.cuda.device_count()))
    logger.info('__Devices')
    call(["nvidia-smi", "--format=csv",
          "--query-gpu=index,name,driver_version,memory.total,memory.used,memory.free"])
    logger.info('Active CUDA Device: GPU {}'.format(torch.cuda.current_device()))
    logger.info('Available devices  {}'.format(torch.cuda.device_count()))
    logger.info('Current cuda device  {}'.format(torch.cuda.current_device())) 
Example #5
Source File: hooks.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def patch_norm_fp32(module):
    """Recursively convert normalization layers from FP16 to FP32.

    Args:
        module (nn.Module): The modules to be converted in FP16.

    Returns:
        nn.Module: The converted module, the normalization layers have been
            converted to FP32.
    """
    if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)):
        module.float()
        if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3':
            module.forward = patch_forward_method(module.forward, torch.half,
                                                  torch.float)
    for child in module.children():
        patch_norm_fp32(child)
    return module 
Example #6
Source File: test_torch_scheduler.py    From torchbearer with MIT License 6 votes vote down vote up
def test_lambda_lr(self):
        from distutils.version import LooseVersion
        import torch
        version = torch.__version__ if str(torch.__version__) is torch.__version__ else "0.4.0"
        if LooseVersion(version) > LooseVersion("1.0.0"): # CyclicLR is implemented
            with patch('torch.optim.lr_scheduler.CyclicLR') as lr_mock:
                state = {torchbearer.OPTIMIZER: 'optimizer'}

                scheduler = CyclicLR(0.01, 0.1, monitor='test', step_size_up=200, step_size_down=None, mode='triangular',
                         gamma=2., scale_fn=None, scale_mode='cycle', cycle_momentum=False, base_momentum=0.7, max_momentum=0.9,
                         last_epoch=-1, step_on_batch='batch')
                scheduler.on_start(state)

                lr_mock.assert_called_once_with('optimizer', 0.01, 0.1, step_size_up=200, step_size_down=None, mode='triangular',
                         gamma=2., scale_fn=None, scale_mode='cycle', cycle_momentum=False, base_momentum=0.7, max_momentum=0.9,
                         last_epoch=-1)
                self.assertTrue(scheduler._step_on_batch == 'batch')
                self.assertTrue(scheduler._monitor == 'test')
        else:
            self.assertRaises(NotImplementedError, lambda: CyclicLR(0.01, 0.1, monitor='test', step_size_up=200, step_size_down=None, mode='triangular',
                         gamma=2., scale_fn=None, scale_mode='cycle', cycle_momentum=False, base_momentum=0.7, max_momentum=0.9,
                         last_epoch=-1, step_on_batch='batch')) 
Example #7
Source File: losses.py    From DepthAwareCNN with MIT License 6 votes vote down vote up
def cross_entropy2d(input, target, weight=None, size_average=True):
    # input: (n, c, h, w), target: (n, h, w)
    n, c, h, w = input.size()
    # log_p: (n, c, h, w)
    if LooseVersion(torch.__version__) < LooseVersion('0.3'):
        # ==0.2.X
        log_p = F.log_softmax(input).cuda()
    else:
        # >=0.3
        log_p = F.log_softmax(input, dim=1).cuda()
    # log_p: (n*h*w, c)
    log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous()
    log_p = log_p[target.view(n, h, w, 1).repeat(1, 1, 1, c) >= 0]
    log_p = log_p.view(-1, c)
    # target: (n*h*w,)
    # mask = (target != 255)
    # target = target[mask]
    loss = F.nll_loss(log_p, target, weight=weight, size_average=False, ignore_index=255).cuda()
    if size_average:
        loss /= (n*h*w)
    return loss 
Example #8
Source File: models.py    From CSGNet with MIT License 6 votes vote down vote up
def __init__(self, unique_draws: List, stack_size: int, steps: int,
                 canvas_shape: List):
        """
        This class parses complete output from the network which are in joint
        fashion. This class can be used to generate final canvas and
        expressions.
        :param unique_draws: Unique draw/op operations in the current dataset
        :param stack_size: Stack size
        :param steps: Number of steps in the program
        :param canvas_shape: Shape of the canvases
        """
        self.canvas_shape = canvas_shape
        self.stack_size = stack_size
        self.steps = steps
        self.Parser = Parser()
        self.sim = SimulateStack(self.stack_size, self.canvas_shape)
        self.unique_draws = unique_draws
        self.pytorch_version = torch.__version__[2] 
Example #9
Source File: base_trainer.py    From fastMRI with MIT License 6 votes vote down vote up
def initial_setup(self, args):
        ############
        logging.info(f"run pid: {os.getpid()} parent: {os.getppid()}")
        logging.info("#########")
        logging.info(args.__dict__)
        logging.info(f"Rank: {args.rank} World_size: {args.world_size}, Run {args.run_name}")

        args.cuda = torch.cuda.is_available()
        logging.info(f"Pytorch version: {torch.__version__}")
        logging.info("Using CUDA: {} CUDA AVAIL: {} #DEVICES: {} VERSION: {}".format(
            args.cuda, torch.cuda.is_available(), torch.cuda.device_count(),
            torch.version.cuda))
        if not args.cuda:
            self.device = 'cpu'
        else:
            self.device = 'cuda'
            cudnn.benchmark = True
            cudnn.enabled = True

        random.seed(args.seed) # The seed needs to be constant between processes.
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed_all(args.seed) 
Example #10
Source File: wrappers.py    From mmcv with Apache License 2.0 6 votes vote down vote up
def forward(self, x):
        if x.numel() == 0 and torch.__version__ <= '1.4':
            out_shape = [x.shape[0], self.out_channels]
            for i, k, p, s, d in zip(x.shape[-2:], self.kernel_size,
                                     self.padding, self.stride, self.dilation):
                o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1
                out_shape.append(o)
            empty = NewEmptyTensorOp.apply(x, out_shape)
            if self.training:
                # produce dummy gradient to avoid DDP warning.
                dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
                return empty + dummy
            else:
                return empty

        return super().forward(x) 
Example #11
Source File: wrappers.py    From mmcv with Apache License 2.0 6 votes vote down vote up
def forward(self, x):
        if x.numel() == 0 and torch.__version__ <= '1.4.0':
            out_shape = [x.shape[0], self.out_channels]
            for i, k, p, s, d, op in zip(x.shape[-2:], self.kernel_size,
                                         self.padding, self.stride,
                                         self.dilation, self.output_padding):
                out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op)
            empty = NewEmptyTensorOp.apply(x, out_shape)
            if self.training:
                # produce dummy gradient to avoid DDP warning.
                dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
                return empty + dummy
            else:
                return empty

        return super(ConvTranspose2d, self).forward(x) 
Example #12
Source File: __init__.py    From MOTDT with MIT License 6 votes vote down vote up
def extract_reid_features(reid_model, image, tlbrs):
    if len(tlbrs) == 0:
        return torch.FloatTensor()

    patches = extract_image_patches(image, tlbrs)
    patches = np.asarray([im_preprocess(cv2.resize(p, reid_model.inp_size)) for p in patches], dtype=np.float32)

    gpu = net_utils.get_device(reid_model)
    if LooseVersion(torch.__version__) > LooseVersion('0.3.1'):
        with torch.no_grad():
            im_var = Variable(torch.from_numpy(patches))
            if gpu is not None:
                im_var = im_var.cuda(gpu)
            features = reid_model(im_var).data
    else:
        im_var = Variable(torch.from_numpy(patches), volatile=True)
        if gpu is not None:
            im_var = im_var.cuda(gpu)
        features = reid_model(im_var).data

    return features 
Example #13
Source File: wrappers.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def forward(self, x):
        if x.numel() == 0 and torch.__version__ <= '1.4':
            out_shape = [x.shape[0], self.out_channels]
            for i, k, p, s, d in zip(x.shape[-2:], self.kernel_size,
                                     self.padding, self.stride, self.dilation):
                o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1
                out_shape.append(o)
            empty = NewEmptyTensorOp.apply(x, out_shape)
            if self.training:
                # produce dummy gradient to avoid DDP warning.
                dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
                return empty + dummy
            else:
                return empty

        return super().forward(x) 
Example #14
Source File: run.py    From mead-baseline with Apache License 2.0 6 votes vote down vote up
def run_model(si, config_params, logs, settings, datasets, embeddings, task_name, dir_, gpu):
    """Run a model and collect system information."""
    os.chdir(dir_)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    config_params['reporting'] = {}
    with suppress_output():
        task = mead.Task.get_task_specific(task_name, logs, settings)
        task.read_config(config_params, datasets)
        task.initialize(embeddings)
        task.train()

        si['framework_version'] = get_framework_version(config_params['backend'])
        si['cuda'], si['cudnn'] = get_cuda_version(config_params['backend'])
        si['gpu_name'], si['gpu_mem'] = get_gpu_info(gpu)
        si['cpu_name'], si['cpu_mem'], si['cpu_cores'] = get_cpu_info()
        si['python'] = get_python_version()
        si['baseline'] = version_str_to_tuple(baseline.__version__) 
Example #15
Source File: config.py    From pcl.pytorch with MIT License 6 votes vote down vote up
def assert_and_infer_cfg(make_immutable=True):
    """Call this function in your script after you have finished setting all cfg
    values that are necessary (e.g., merging a config from a file, merging
    command line config options, etc.). By default, this function will also
    mark the global cfg as immutable to prevent changing the global cfg settings
    during script execution (which can lead to hard to debug errors or code
    that's harder to understand than is necessary).
    """
    if __C.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS:
        assert __C.VGG.IMAGENET_PRETRAINED_WEIGHTS, \
            "Path to the weight file must not be empty to load imagenet pertrained resnets."
    if version.parse(torch.__version__) < version.parse('0.4.0'):
        __C.PYTORCH_VERSION_LESS_THAN_040 = True
        # create alias for PyTorch version less than 0.4.0
        init.uniform_ = init.uniform
        init.normal_ = init.normal
        init.constant_ = init.constant
        nn.GroupNorm = mynn.GroupNorm
    if make_immutable:
        cfg.immutable(True) 
Example #16
Source File: face_id_mlp_net.py    From One_Shot_Face_Reenactment with MIT License 6 votes vote down vote up
def forward(self, input, label):
        # --------------------------- cos(theta) & phi(theta) ---------------------------
        cosine = F.linear(F.normalize(input), F.normalize(self.weight))
        sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
        phi = cosine * self.cos_m - sine * self.sin_m
        if self.easy_margin:
            phi = torch.where(cosine > 0, phi, cosine)
        else:
            phi = torch.where(cosine > self.th, phi, cosine - self.mm)
        # --------------------------- convert label to one-hot ---------------------------
        # one_hot = torch.zeros(cosine.size(), requires_grad=True, device='cuda')
        one_hot = torch.zeros(cosine.size(), device='cuda')
        one_hot.scatter_(1, label.view(-1, 1).long(), 1)
        # -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
        # you can use torch.where if your torch.__version__ is 0.4
        output = (one_hot * phi) + ((1.0 - one_hot) * cosine)
        output *= self.s
        # print(output)

        return output 
Example #17
Source File: layers.py    From ChaLearn_liveness_challenge with MIT License 6 votes vote down vote up
def forward(self, input, label=None):
        if self.training:
            # --------------------------- cos(theta) & phi(theta) ---------------------------
            cosine = F.linear(F.normalize(input), F.normalize(self.weight))
            sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
            phi = cosine * self.cos_m - sine * self.sin_m
            if self.easy_margin:
                phi = torch.where(cosine > 0, phi, cosine)
            else:
                phi = torch.where(cosine > self.th, phi, cosine - self.mm)
            # --------------------------- convert label to one-hot ---------------------------
            # one_hot = torch.zeros(cosine.size(), requires_grad=True, device='cuda')
            one_hot = torch.zeros(cosine.size(), device='cuda')
            one_hot.scatter_(1, label.view(-1, 1).long(), 1)
            # -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
            output = (one_hot * phi) + ((1.0 - one_hot) * cosine)  # you can use torch.where if your torch.__version__ is 0.4
            output *= self.s
        else:
            cosine = F.linear(F.normalize(input), F.normalize(self.weight))
            output = self.s * cosine

        return output 
Example #18
Source File: layers.py    From ChaLearn_liveness_challenge with MIT License 6 votes vote down vote up
def forward(self, input, label=None):
        if self.training:
            # --------------------------- cos(theta) & phi(theta) ---------------------------
            cosine = F.linear(F.normalize(input), F.normalize(self.weight))
            sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
            phi = cosine * self.cos_m - sine * self.sin_m
            if self.easy_margin:
                phi = torch.where(cosine > 0, phi, cosine)
            else:
                phi = torch.where(cosine > self.th, phi, cosine - self.mm)
            # --------------------------- convert label to one-hot ---------------------------
            # one_hot = torch.zeros(cosine.size(), requires_grad=True, device='cuda')
            one_hot = torch.zeros(cosine.size(), device='cuda')
            one_hot.scatter_(1, label.view(-1, 1).long(), 1)
            # -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
            output = (one_hot * phi) + ((1.0 - one_hot) * cosine)  # you can use torch.where if your torch.__version__ is 0.4
            output *= self.s
        else:
            cosine = F.linear(F.normalize(input), F.normalize(self.weight))
            output = self.s * cosine

        return output 
Example #19
Source File: head.py    From lumin with Apache License 2.0 6 votes vote down vote up
def _build_lookup(self) -> None:
        r'''
        Builds lookup-tables necessary to map flattened data to correct locations for reshaping into a matrix.
        Also handles missing data, i.e. elements in the matrix which do not exist in the flattened data
        '''

        shp = (self.n_v,self.n_fpv) if self.row_wise else (self.n_fpv,self.n_v)
        lookup  = torch.zeros(shp, dtype=torch.long)
        missing = torch.zeros(shp, dtype=torch.bool if LooseVersion(torch.__version__) >= LooseVersion("1.2") else torch.uint8)
        if self.row_wise:
            for i, v in enumerate(self.vecs):
                for j, c in enumerate(self.fpv):
                    f = f'{v}_{c}'
                    if f in self.cont_feats: lookup[i,j]  = self.cont_feats.index(f)
                    else:                    missing[i,j] = 1
        else:
            for j, v in enumerate(self.vecs):
                for i, c in enumerate(self.fpv):
                    f = f'{v}_{c}'
                    if f in self.cont_feats: lookup[i,j]  = self.cont_feats.index(f)
                    else:                    missing[i,j] = 1
        self.missing,self.lookup = to_device(missing.flatten()),to_device(lookup.flatten()) 
Example #20
Source File: fonduer_model.py    From fonduer with MIT License 6 votes vote down vote up
def _get_default_conda_env() -> Optional[Dict[str, Any]]:
    """Get default Conda environment.

    :return: The default Conda environment for MLflow Models produced by calls to
             :func:`save_model()` and :func:`log_model()`.
    """
    import torch
    import fonduer

    return _mlflow_conda_env(
        additional_conda_deps=[
            "pytorch={}".format(torch.__version__),  # type: ignore
            "psycopg2",
            "pip",
        ],
        additional_pip_deps=["fonduer=={}".format(fonduer.__version__)],
        additional_conda_channels=["pytorch"],
    ) 
Example #21
Source File: classifier.py    From MOTDT with MIT License 6 votes vote down vote up
def update(self, image):
        im_croped, im_pad, real_shape, im_scale = self.im_preprocess(image)

        self.im_scale = im_scale
        self.ori_image_shape = image.shape
        im_data = torch.from_numpy(im_croped).permute(2, 0, 1)
        im_data = im_data.unsqueeze(0)

        # forward
        if LooseVersion(torch.__version__) > LooseVersion('0.3.1'):
            with torch.no_grad():
                im_var = Variable(im_data).cuda(self.gpu)
                self.score_map = self.model(im_var)
        else:
            im_var = Variable(im_data, volatile=True).cuda(self.gpu)
            self.score_map = self.model(im_var)

        return real_shape, im_scale 
Example #22
Source File: lr_schedulers.py    From argus with MIT License 5 votes vote down vote up
def __init__(self,
                 max_lr,
                 total_steps=None,
                 epochs=None,
                 steps_per_epoch=None,
                 pct_start=0.3,
                 anneal_strategy='cos',
                 cycle_momentum=True,
                 base_momentum=0.85,
                 max_momentum=0.95,
                 div_factor=25.,
                 final_div_factor=1e4):
        from distutils.version import LooseVersion
        if LooseVersion(torch.__version__) >= LooseVersion("1.3.0"):
            super().__init__(
                lambda opt: _scheduler.OneCycleLR(opt,
                                                  max_lr,
                                                  total_steps=total_steps,
                                                  epochs=epochs,
                                                  steps_per_epoch=steps_per_epoch,
                                                  pct_start=pct_start,
                                                  anneal_strategy=anneal_strategy,
                                                  cycle_momentum=cycle_momentum,
                                                  base_momentum=base_momentum,
                                                  max_momentum=max_momentum,
                                                  div_factor=div_factor,
                                                  final_div_factor=final_div_factor),
                step_on_iteration=True
            )
        else:
            raise ImportError("Update torch>=1.3.0 to use 'OneCycleLR'") 
Example #23
Source File: face_id_mlp_net.py    From One_Shot_Face_Reenactment with MIT License 5 votes vote down vote up
def forward(self, input, label):
        # --------------------------- cos(theta) & phi(theta) ---------------------------
        cosine = F.linear(F.normalize(input), F.normalize(self.weight))
        phi = cosine - self.m
        # --------------------------- convert label to one-hot ---------------------------
        one_hot = torch.zeros(cosine.size(), device='cuda')
        # one_hot = one_hot.cuda() if cosine.is_cuda else one_hot
        one_hot.scatter_(1, label.view(-1, 1).long(), 1)
        # -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
        # you can use torch.where if your torch.__version__ is 0.4
        output = (one_hot * phi) + ((1.0 - one_hot) * cosine)
        output *= self.s
        # print(output)

        return output 
Example #24
Source File: model.py    From c3dpo_nrsfm with MIT License 5 votes vote down vote up
def pytorch_ge12():
    v = torch.__version__
    v = float('.'.join(v.split('.')[0:2]))
    return v >= 1.2 
Example #25
Source File: test_net.py    From skorch with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_repr_fitted_works(self, net_cls, module_cls, data):
        X, y = data
        net = net_cls(
            module_cls,
            module__hidden_units=11,
            module__nonlin=nn.PReLU(),
        )
        net.fit(X[:50], y[:50])
        result = net.__repr__()
        expected = """<class 'skorch.classifier.NeuralNetClassifier'>[initialized](
  module_=MLPModule(
    (nonlin): PReLU(num_parameters=1)
    (output_nonlin): Softmax()
    (sequential): Sequential(
      (0): Linear(in_features=20, out_features=11, bias=True)
      (1): PReLU(num_parameters=1)
      (2): Dropout(p=0.5)
      (3): Linear(in_features=11, out_features=11, bias=True)
      (4): PReLU(num_parameters=1)
      (5): Dropout(p=0.5)
      (6): Linear(in_features=11, out_features=2, bias=True)
      (7): Softmax()
    )
  ),
)"""
        if LooseVersion(torch.__version__) >= '1.2':
            expected = expected.replace("Softmax()", "Softmax(dim=-1)")
            expected = expected.replace("Dropout(p=0.5)",
                                        "Dropout(p=0.5, inplace=False)")
        assert result == expected 
Example #26
Source File: Criteria.py    From ext_portrait_segmentation with MIT License 5 votes vote down vote up
def __init__(self, weight=None, ignore = None):
        '''
        :param weight: 1D weight vector to deal with the class-imbalance
        '''

        super().__init__()
        if int(torch.__version__[2]) < 4:
            self.loss = nn.NLLLoss2d(weight, ignore_index=ignore)
        else:
            self.loss = nn.NLLLoss(weight, ignore_index=ignore) 
Example #27
Source File: test_net.py    From skorch with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_repr_initialized_works(self, net_cls, module_cls):
        net = net_cls(
            module_cls,
            module__hidden_units=42,
        )
        net.initialize()
        result = net.__repr__()
        expected = """<class 'skorch.classifier.NeuralNetClassifier'>[initialized](
  module_=MLPModule(
    (nonlin): ReLU()
    (output_nonlin): Softmax()
    (sequential): Sequential(
      (0): Linear(in_features=20, out_features=42, bias=True)
      (1): ReLU()
      (2): Dropout(p=0.5)
      (3): Linear(in_features=42, out_features=42, bias=True)
      (4): ReLU()
      (5): Dropout(p=0.5)
      (6): Linear(in_features=42, out_features=2, bias=True)
      (7): Softmax()
    )
  ),
)"""
        if LooseVersion(torch.__version__) >= '1.2':
            expected = expected.replace("Softmax()", "Softmax(dim=-1)")
            expected = expected.replace("Dropout(p=0.5)",
                                        "Dropout(p=0.5, inplace=False)")
        assert result == expected 
Example #28
Source File: setup.py    From L3C-PyTorch with GNU General Public License v3.0 5 votes vote down vote up
def _assert_torch_version_sufficient():
    import torch
    if LooseVersion(torch.__version__) >= LooseVersion('1.0'):
        return
    print(_bold_warn_str('Error:'), 'Need PyTorch version >= 1.0, found {}'.format(torch.__version__))
    sys.exit(1) 
Example #29
Source File: sys_utils.py    From VASNet with MIT License 5 votes vote down vote up
def ge_pkg_versions():
    dep_versions = {}
    dep_versions['display'] = run_command('cat /proc/driver/nvidia/version')

    dep_versions['cuda'] = 'NA'
    cuda_home = '/usr/local/cuda/'
    if 'CUDA_HOME' in os.environ:
        cuda_home = os.environ['CUDA_HOME']

    cmd = cuda_home+'/version.txt'
    if os.path.isfile(cmd):
        dep_versions['cuda'] = run_command('cat '+cmd)

    dep_versions['cudnn'] = torch.backends.cudnn.version()
    dep_versions['platform'] = platform.platform()
    dep_versions['python'] = sys.version_info[:3]
    dep_versions['torch'] = torch.__version__
    dep_versions['numpy'] = np.__version__
    dep_versions['h5py'] = h5py.__version__
    dep_versions['json'] = json.__version__
    dep_versions['ortools'] = ortools.__version__
    dep_versions['torchvision'] = pkg_resources.get_distribution("torchvision").version

    # dep_versions['PIL'] = Image.VERSION
    # dep_versions['OpenCV'] = 'NA'
    # if 'cv2' in sys.modules:
    #     dep_versions['OpenCV'] = cv2.__version__


    return dep_versions 
Example #30
Source File: config.py    From FPN-Pytorch with MIT License 5 votes vote down vote up
def assert_and_infer_cfg(make_immutable=True):
    """Call this function in your script after you have finished setting all cfg
    values that are necessary (e.g., merging a config from a file, merging
    command line config options, etc.). By default, this function will also
    mark the global cfg as immutable to prevent changing the global cfg settings
    during script execution (which can lead to hard to debug errors or code
    that's harder to understand than is necessary).
    """
    if __C.MODEL.RPN_ONLY or __C.MODEL.FASTER_RCNN:
        __C.RPN.RPN_ON = True
    if __C.RPN.RPN_ON or __C.RETINANET.RETINANET_ON:
        __C.TEST.PRECOMPUTED_PROPOSALS = False
    if __C.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS:
        assert __C.RESNETS.IMAGENET_PRETRAINED_WEIGHTS, \
            "Path to the weight file must not be empty to load imagenet pertrained resnets."
    if set([__C.MRCNN.ROI_MASK_HEAD, __C.KRCNN.ROI_KEYPOINTS_HEAD]) & _SHARE_RES5_HEADS:
        __C.MODEL.SHARE_RES5 = True
    if version.parse(torch.__version__) < version.parse('0.4.0'):
        __C.PYTORCH_VERSION_LESS_THAN_040 = True
        # create alias for PyTorch version less than 0.4.0
        init.uniform_ = init.uniform
        init.normal_ = init.normal
        init.constant_ = init.constant
        nn.GroupNorm = mynn.GroupNorm
    if make_immutable:
        cfg.immutable(True)